code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
def a_ ( lowerCamelCase ):
if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCamelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCamelCase ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["pixel_values"]
def __init__( self : Dict ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Union[int, float] = 1 / 255 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,**lowerCamelCase__ : List[Any] ,):
super().__init__(**lowerCamelCase__ )
UpperCAmelCase__ = size if size is not None else {'shortest_edge': 256}
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
UpperCAmelCase__ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,param_name='crop_size' )
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = size
UpperCAmelCase__ = do_center_crop
UpperCAmelCase__ = crop_size
UpperCAmelCase__ = resample
UpperCAmelCase__ = do_rescale
UpperCAmelCase__ = rescale_factor
UpperCAmelCase__ = offset
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : List[str] ,):
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
if "shortest_edge" in size:
UpperCAmelCase__ = get_resize_output_image_size(lowerCamelCase__ ,size['shortest_edge'] ,default_to_square=lowerCamelCase__ )
elif "height" in size and "width" in size:
UpperCAmelCase__ = (size['height'], size['width'])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : List[Any] ,):
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCamelCase__ ,size=(size['height'], size['width']) ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[int, float] ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Union[str, Any] ,):
UpperCAmelCase__ = image.astype(np.floataa )
if offset:
UpperCAmelCase__ = image - (scale / 2)
return rescale(lowerCamelCase__ ,scale=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[float, List[float]] ,lowerCamelCase__ : Union[float, List[float]] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Dict ,):
return normalize(lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : ImageInput ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : float = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
UpperCAmelCase__ = to_numpy_array(lowerCamelCase__ )
if do_resize:
UpperCAmelCase__ = self.resize(image=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ )
if do_center_crop:
UpperCAmelCase__ = self.center_crop(lowerCamelCase__ ,size=lowerCamelCase__ )
if do_rescale:
UpperCAmelCase__ = self.rescale(image=lowerCamelCase__ ,scale=lowerCamelCase__ ,offset=lowerCamelCase__ )
if do_normalize:
UpperCAmelCase__ = self.normalize(image=lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ )
UpperCAmelCase__ = to_channel_dimension_format(lowerCamelCase__ ,lowerCamelCase__ )
return image
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : ImageInput ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : float = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST ,**lowerCamelCase__ : Dict ,):
UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ = resample if resample is not None else self.resample
UpperCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ = offset if offset is not None else self.offset
UpperCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ = image_std if image_std is not None else self.image_std
UpperCAmelCase__ = size if size is not None else self.size
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
UpperCAmelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,param_name='crop_size' )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
UpperCAmelCase__ = make_batched(lowerCamelCase__ )
UpperCAmelCase__ = [
[
self._preprocess_image(
image=lowerCamelCase__ ,do_resize=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,do_center_crop=lowerCamelCase__ ,crop_size=lowerCamelCase__ ,do_rescale=lowerCamelCase__ ,rescale_factor=lowerCamelCase__ ,offset=lowerCamelCase__ ,do_normalize=lowerCamelCase__ ,image_mean=lowerCamelCase__ ,image_std=lowerCamelCase__ ,data_format=lowerCamelCase__ ,)
for img in video
]
for video in videos
]
UpperCAmelCase__ = {'pixel_values': videos}
return BatchFeature(data=lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
| 98
|
'''simple docstring'''
from PIL import Image
def _a( UpperCamelCase__ : Image, UpperCamelCase__ : float ):
'''simple docstring'''
def brightness(UpperCamelCase__ : int ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(UpperCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
a_ = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 152
| 0
|
"""simple docstring"""
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 351
|
"""simple docstring"""
from math import pi
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 312
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_A = logging.get_logger(__name__)
# TODO: upload to AWS
_A = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any = "retribert"
def __init__( self , A_=30522 , A_=768 , A_=8 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1E-12 , A_=True , A_=128 , A_=0 , **A_ , ) -> List[str]:
super().__init__(pad_token_id=A_ , **A_ )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =hidden_act
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =share_encoders
__UpperCamelCase =projection_dim
| 62
|
from math import sqrt
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Union[str, Any] = 0
for i in range(1, int(sqrt(snake_case__ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case__ ):
total += i + n // i
elif i == sqrt(snake_case__ ):
total += i
return total - n
def _UpperCamelCase ( snake_case__ = 1_0000 ) -> int:
__UpperCAmelCase : List[str] = sum(
i
for i in range(1, snake_case__ )
if sum_of_divisors(sum_of_divisors(snake_case__ ) ) == i and sum_of_divisors(snake_case__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 157
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __lowerCamelCase ( __lowerCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = 42
class __lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 65536 , UpperCAmelCase = None , UpperCAmelCase = 2 , UpperCAmelCase = 2 , UpperCAmelCase = 0 , UpperCAmelCase = "fourier" , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = 0.0 , UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase = "UNetMidBlock1D" , UpperCAmelCase = None , UpperCAmelCase = (32, 32, 64) , UpperCAmelCase = None , UpperCAmelCase = 8 , UpperCAmelCase = 1 , UpperCAmelCase = False , ) -> int:
'''simple docstring'''
super().__init__()
lowercase_ = sample_size
# time
if time_embedding_type == "fourier":
lowercase_ = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase__ , log=lowerCamelCase__ , flip_sin_to_cos=lowerCamelCase__ )
lowercase_ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowercase_ = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase__ , downscale_freq_shift=lowerCamelCase__ )
lowercase_ = block_out_channels[0]
if use_timestep_embedding:
lowercase_ = block_out_channels[0] * 4
lowercase_ = TimestepEmbedding(
in_channels=lowerCamelCase__ , time_embed_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ , out_dim=block_out_channels[0] , )
lowercase_ = nn.ModuleList([] )
lowercase_ = None
lowercase_ = nn.ModuleList([] )
lowercase_ = None
# down
lowercase_ = in_channels
for i, down_block_type in enumerate(lowerCamelCase__ ):
lowercase_ = output_channel
lowercase_ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowercase_ = i == len(lowerCamelCase__ ) - 1
lowercase_ = get_down_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase__ )
# mid
lowercase_ = get_mid_block(
lowerCamelCase__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase__ , add_downsample=lowerCamelCase__ , )
# up
lowercase_ = list(reversed(lowerCamelCase__ ) )
lowercase_ = reversed_block_out_channels[0]
if out_block_type is None:
lowercase_ = out_channels
else:
lowercase_ = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase__ ):
lowercase_ = output_channel
lowercase_ = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase__ ) - 1 else final_upsample_channels
)
lowercase_ = i == len(lowerCamelCase__ ) - 1
lowercase_ = get_up_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase__ )
lowercase_ = output_channel
# out
lowercase_ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowercase_ = get_out_block(
out_block_type=lowerCamelCase__ , num_groups_out=lowerCamelCase__ , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase__ , act_fn=lowerCamelCase__ , fc_dim=block_out_channels[-1] // 4 , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ) -> Union[UNetaDOutput, Tuple]:
'''simple docstring'''
lowercase_ = timestep
if not torch.is_tensor(lowerCamelCase__ ):
lowercase_ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
lowercase_ = timesteps[None].to(sample.device )
lowercase_ = self.time_proj(lowerCamelCase__ )
if self.config.use_timestep_embedding:
lowercase_ = self.time_mlp(lowerCamelCase__ )
else:
lowercase_ = timestep_embed[..., None]
lowercase_ = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowercase_ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowercase_ = ()
for downsample_block in self.down_blocks:
lowercase_ = downsample_block(hidden_states=lowerCamelCase__ , temb=lowerCamelCase__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowercase_ = self.mid_block(lowerCamelCase__ , lowerCamelCase__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowercase_ = down_block_res_samples[-1:]
lowercase_ = down_block_res_samples[:-1]
lowercase_ = upsample_block(lowerCamelCase__ , res_hidden_states_tuple=lowerCamelCase__ , temb=lowerCamelCase__ )
# 5. post-process
if self.out_block:
lowercase_ = self.out_block(lowerCamelCase__ , lowerCamelCase__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase__ )
| 357
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 1
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase )
return image
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def A__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(UpperCAmelCase )
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
def extract(*UpperCAmelCase , **UpperCAmelCase ):
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = torch.ones([0] )
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
self.pixel_values.to(UpperCAmelCase )
return self
return Out()
return extract
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
lowercase_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , )
lowercase_ = output.images
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
# put models in fp16
lowercase_ = unet.half()
lowercase_ = vae.half()
lowercase_ = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase_ = init_image.resize((760, 504) )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
lowercase_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowercase_ = init_image.resize((768, 512) )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 297
| 0
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
__lowerCamelCase = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
__lowerCamelCase = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
__lowerCamelCase = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
__lowerCamelCase = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
__lowerCamelCase = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
__lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : Any = FLAX_MODEL_MAPPING
__lowerCamelCase = auto_class_update(FlaxAutoModel)
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : Any = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__lowerCamelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__lowerCamelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : int = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__lowerCamelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : str = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__lowerCamelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : Dict = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__lowerCamelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : Tuple = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__lowerCamelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : str = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__lowerCamelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : int = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__lowerCamelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : Optional[Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__lowerCamelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : Tuple = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCamelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : int = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__lowerCamelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : Tuple = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__lowerCamelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 59
|
lowercase = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowercase = [{"type": "code", "content": INSTALL_CONTENT}]
lowercase = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 178
| 0
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( A ) -> str:
return int(x / 2**20 )
class a__ :
'''simple docstring'''
def __enter__( self ) -> Tuple:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCAmelCase__ = torch.cuda.memory_allocated()
return self
def __exit__( self , *lowerCamelCase_ ) -> Tuple:
gc.collect()
torch.cuda.empty_cache()
lowerCAmelCase__ = torch.cuda.memory_allocated()
lowerCAmelCase__ = torch.cuda.max_memory_allocated()
lowerCAmelCase__ = bamb(self.end - self.begin )
lowerCAmelCase__ = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def _snake_case ( A , A = 16 , A = "bert-base-cased" , A = 320 , A = 160 , ) -> List[str]:
lowerCAmelCase__ = AutoTokenizer.from_pretrained(A )
lowerCAmelCase__ = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': F"""train[:{n_train}]""", '''validation''': F"""validation[:{n_val}]"""} )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
A , batched=A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=A )
lowerCAmelCase__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
def _snake_case ( A , A ) -> str:
# Initialize accelerator
lowerCAmelCase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config['''lr''']
lowerCAmelCase__ = int(config['''num_epochs'''] )
lowerCAmelCase__ = int(config['''seed'''] )
lowerCAmelCase__ = int(config['''batch_size'''] )
lowerCAmelCase__ = args.model_name_or_path
set_seed(A )
lowerCAmelCase__ , lowerCAmelCase__ = get_dataloaders(A , A , A , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A )
# Instantiate optimizer
lowerCAmelCase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase__ = optimizer_cls(params=model.parameters() , lr=A )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCAmelCase__ = 1
lowerCAmelCase__ = (len(A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=0 , num_training_steps=A , )
else:
lowerCAmelCase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(
A , A , A , A , A )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase__ = 0
# Now we train the model
lowerCAmelCase__ = {}
for epoch in range(A , A ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(A ):
lowerCAmelCase__ = model(**A )
lowerCAmelCase__ = outputs.loss
lowerCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCAmelCase__ = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(A , A )
def _snake_case ( ) -> int:
lowerCAmelCase__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=A , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=A , )
parser.add_argument(
'''--output_dir''' , type=A , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=A , default=A , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=A , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=A , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=A , default=1 , help='''Number of train epochs.''' , )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 228
|
'''simple docstring'''
import argparse
import json
import subprocess
def _snake_case ( A , A ) -> Tuple:
lowerCAmelCase__ = []
lowerCAmelCase__ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
lowerCAmelCase__ = subprocess.run(A , shell=A , stdout=subprocess.PIPE )
lowerCAmelCase__ = output.stdout.decode('''utf-8''' )
lowerCAmelCase__ = json.loads(A )
lowerCAmelCase__ = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(A )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(A ) )
if len(A ) > 0:
lowerCAmelCase__ = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def _snake_case ( A ) -> Optional[Any]:
return values.split(''',''' )
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
__UpperCAmelCase = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 228
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a :str = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Union[str, Any] = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__a :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 312
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=16 , _lowerCamelCase=[1, 2, 1] , _lowerCamelCase=[2, 2, 4] , _lowerCamelCase=2 , _lowerCamelCase=2.0 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=10 , _lowerCamelCase=8 , _lowerCamelCase=["stage1", "stage2", "stage3"] , _lowerCamelCase=[1, 2, 3] , ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : Tuple = patch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Dict = embed_dim
UpperCAmelCase__ : List[Any] = depths
UpperCAmelCase__ : Dict = num_heads
UpperCAmelCase__ : Any = window_size
UpperCAmelCase__ : str = mlp_ratio
UpperCAmelCase__ : str = qkv_bias
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = drop_path_rate
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Union[str, Any] = use_absolute_embeddings
UpperCAmelCase__ : Optional[int] = patch_norm
UpperCAmelCase__ : Any = layer_norm_eps
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : Tuple = is_training
UpperCAmelCase__ : Union[str, Any] = scope
UpperCAmelCase__ : int = use_labels
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Tuple = encoder_stride
UpperCAmelCase__ : Optional[int] = out_features
UpperCAmelCase__ : str = out_indices
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Dict = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _a (self ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = MaskFormerSwinModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Dict = model(_lowerCamelCase )
UpperCAmelCase__ : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase__ : List[str] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = MaskFormerSwinBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Tuple = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowerCamelCase ):
UpperCAmelCase__ : Union[str, Any] = ["""stem"""]
UpperCAmelCase__ : List[Any] = MaskFormerSwinBackbone(config=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = MaskFormerSwinModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=_lowerCamelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a (self ):
"""simple docstring"""
return
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[int] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[int] = model_class(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Any = [*signature.parameters.keys()]
UpperCAmelCase__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _a (self ):
"""simple docstring"""
pass
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCAmelCase__ : str = outputs.hidden_states
UpperCAmelCase__ : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# Swin has a different seq_length
UpperCAmelCase__ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Optional[Any] = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Tuple = 3
UpperCAmelCase__ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : List[Any] = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = 0
return t
def check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase={} ):
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase__ : str = model(**_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ).to_tuple()
def recursive_check(_lowerCamelCase , _lowerCamelCase ):
if isinstance(_lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase , _lowerCamelCase ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowerCamelCase ) , set_nan_tensor_to_zero(_lowerCamelCase ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
F""" {torch.isnan(_lowerCamelCase ).any()} and `inf`: {torch.isinf(_lowerCamelCase )}. Dict has"""
F""" `nan`: {torch.isnan(_lowerCamelCase ).any()} and `inf`: {torch.isinf(_lowerCamelCase )}."""
) , )
recursive_check(_lowerCamelCase , _lowerCamelCase )
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[int] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Dict = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
UpperCAmelCase__ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
UpperCAmelCase__ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
UpperCAmelCase__ : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
@require_torch
class lowerCamelCase ( unittest.TestCase , lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = MaskFormerSwinConfig
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = MaskFormerSwinModelTester(self )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[int] = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
UpperCAmelCase__ : Tuple = backbone_class(_lowerCamelCase )
backbone.to(_lowerCamelCase )
backbone.eval()
UpperCAmelCase__ : int = backbone(**_lowerCamelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowerCamelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
UpperCAmelCase__ : List[str] = backbone(**_lowerCamelCase , output_hidden_states=_lowerCamelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
UpperCAmelCase__ : List[str] = backbone(**_lowerCamelCase , output_attentions=_lowerCamelCase )
self.assertIsNotNone(outputs.attentions )
| 171
| 0
|
'''simple docstring'''
import math
import sys
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
if number != int(_SCREAMING_SNAKE_CASE ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__a : Optional[int] = [-1] * (number + 1)
__a : Union[str, Any] = 0
for i in range(1 , number + 1 ):
__a : Optional[int] = sys.maxsize
__a : Optional[int] = int(math.sqrt(_SCREAMING_SNAKE_CASE ) )
for j in range(1 , root + 1 ):
__a : List[Any] = 1 + answers[i - (j**2)]
__a : Optional[int] = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : List[Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
if is_torch_version('<' , '2.0.0' ) or not hasattr(_SCREAMING_SNAKE_CASE , '_dynamo' ):
return False
return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : bool = True ):
__a : int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__a : Any = is_compiled_module(_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[Any] = model
__a : Union[str, Any] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Union[str, Any] = model.module
if not keep_fpaa_wrapper:
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'forward' )
__a : str = model.__dict__.pop('_original_forward' , _SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(_SCREAMING_SNAKE_CASE , '__wrapped__' ):
__a : Any = forward.__wrapped__
if forward == original_forward:
break
__a : str = forward
if getattr(_SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , _SCREAMING_SNAKE_CASE ):
convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[str] = model
__a : Optional[int] = compiled_model
return model
def lowerCamelCase ():
PartialState().wait_for_everyone()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@contextmanager
def lowerCamelCase (**_SCREAMING_SNAKE_CASE : Tuple ):
for key, value in kwargs.items():
__a : Optional[int] = str(_SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
if not hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
__a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , '__class__' , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ):
return obj.__qualname__
if hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
return obj.__name__
return str(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
for key, value in source.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : int = destination.setdefault(_SCREAMING_SNAKE_CASE , {} )
merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a : Tuple = value
return destination
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = None ):
if port is None:
__a : List[str] = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 294
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__UpperCAmelCase =logging.get_logger(__name__)
class a__ ( UpperCAmelCase__ ):
def __init__( self : List[Any] , a : int , a : int , a : float , **a : str ):
"""simple docstring"""
__lowerCamelCase = feature_size
__lowerCamelCase = sampling_rate
__lowerCamelCase = padding_value
__lowerCamelCase = kwargs.pop('''padding_side''' , '''right''' )
__lowerCamelCase = kwargs.pop('''return_attention_mask''' , a )
super().__init__(**a )
def SCREAMING_SNAKE_CASE__ ( self : str , a : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , a : Union[bool, str, PaddingStrategy] = True , a : Optional[int] = None , a : bool = False , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[Union[str, TensorType]] = None , ):
"""simple docstring"""
if isinstance(a , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__lowerCamelCase = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
f""" to this method that includes {self.model_input_names[0]}, but you provided"""
f""" {list(processed_features.keys() )}""" )
__lowerCamelCase = processed_features[self.model_input_names[0]]
__lowerCamelCase = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(a ) == 0:
if return_attention_mask:
__lowerCamelCase = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__lowerCamelCase = required_input[0]
if isinstance(a , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__lowerCamelCase = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(a ):
__lowerCamelCase = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(a ):
__lowerCamelCase = '''tf'''
elif is_torch_tensor(a ):
__lowerCamelCase = '''pt'''
elif isinstance(a , (int, float, list, tuple, np.ndarray) ):
__lowerCamelCase = '''np'''
else:
raise ValueError(
f"""type of {first_element} unknown: {type(a )}. """
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__lowerCamelCase = to_numpy(a )
else:
__lowerCamelCase = [to_numpy(a ) for v in value]
# Convert padding_strategy in PaddingStrategy
__lowerCamelCase = self._get_padding_strategies(padding=a , max_length=a )
__lowerCamelCase = processed_features[self.model_input_names[0]]
__lowerCamelCase = len(a )
if not all(len(a ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
__lowerCamelCase = []
for i in range(a ):
__lowerCamelCase = {k: v[i] for k, v in processed_features.items()}
# truncation
__lowerCamelCase = self._truncate(
a , max_length=a , pad_to_multiple_of=a , truncation=a , )
truncated_inputs.append(a )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__lowerCamelCase = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__lowerCamelCase = PaddingStrategy.MAX_LENGTH
__lowerCamelCase = {}
for i in range(a ):
# padding
__lowerCamelCase = self._pad(
truncated_inputs[i] , max_length=a , padding_strategy=a , pad_to_multiple_of=a , return_attention_mask=a , )
for key, value in outputs.items():
if key not in batch_outputs:
__lowerCamelCase = []
if value.dtype is np.dtype(np.floataa ):
__lowerCamelCase = value.astype(np.floataa )
batch_outputs[key].append(a )
return BatchFeature(a , tensor_type=a )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Union[Dict[str, np.ndarray], BatchFeature] , a : Optional[int] = None , a : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , a : Optional[int] = None , a : Optional[bool] = None , ):
"""simple docstring"""
__lowerCamelCase = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__lowerCamelCase = len(a )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__lowerCamelCase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__lowerCamelCase = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(a ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__lowerCamelCase = np.ones(len(a ) , dtype=np.intaa )
if needs_to_be_padded:
__lowerCamelCase = max_length - len(a )
if self.padding_side == "right":
if return_attention_mask:
__lowerCamelCase = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
__lowerCamelCase = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__lowerCamelCase = np.pad(
a , a , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__lowerCamelCase = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
__lowerCamelCase = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__lowerCamelCase = np.pad(
a , a , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def SCREAMING_SNAKE_CASE__ ( self : int , a : Union[Dict[str, np.ndarray], BatchFeature] , a : Optional[int] = None , a : Optional[int] = None , a : Optional[bool] = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
__lowerCamelCase = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__lowerCamelCase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__lowerCamelCase = len(a ) > max_length
if needs_to_be_truncated:
__lowerCamelCase = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__lowerCamelCase = processed_features['''attention_mask'''][:max_length]
return processed_features
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Any=False , a : Any=None ):
"""simple docstring"""
if padding is not False:
if padding is True:
__lowerCamelCase = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(a , a ):
__lowerCamelCase = PaddingStrategy(a )
elif isinstance(a , a ):
__lowerCamelCase = padding
else:
__lowerCamelCase = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 67
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _A ( _a ):
"""simple docstring"""
def __snake_case ( self : Union[str, Any]):
a : int = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__UpperCAmelCase , "tf_padding"))
self.parent.assertTrue(hasattr(__UpperCAmelCase , "depth_multiplier"))
class _A :
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any]=13 , __UpperCAmelCase : str=3 , __UpperCAmelCase : Optional[int]=32 , __UpperCAmelCase : Dict=0.25 , __UpperCAmelCase : int=8 , __UpperCAmelCase : int=True , __UpperCAmelCase : Optional[int]=1024 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : Union[str, Any]="relu6" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : str=10 , __UpperCAmelCase : List[Any]=None , ):
a : Any = parent
a : int = batch_size
a : Any = num_channels
a : Optional[Any] = image_size
a : int = depth_multiplier
a : Tuple = min_depth
a : Any = tf_padding
a : Union[str, Any] = int(last_hidden_size * depth_multiplier)
a : Optional[int] = output_stride
a : int = hidden_act
a : Dict = classifier_dropout_prob
a : str = use_labels
a : List[Any] = is_training
a : str = num_labels
a : Optional[int] = initializer_range
a : Tuple = scope
def __snake_case ( self : int):
a : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : Any = None
a : Tuple = None
if self.use_labels:
a : List[Any] = ids_tensor([self.batch_size] , self.num_labels)
a : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
a : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __snake_case ( self : Any):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __snake_case ( self : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict):
a : List[Any] = MobileNetVaModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : Dict = model(__UpperCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple):
a : Any = self.num_labels
a : Union[str, Any] = MobileNetVaForImageClassification(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : List[str] = model(__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self : Union[str, Any]):
a : List[Any] = self.prepare_config_and_inputs()
a , a , a , a : Tuple = config_and_inputs
a : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _A ( _a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : List[str] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
UpperCAmelCase : int = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase : Dict = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Tuple = False
UpperCAmelCase : Optional[int] = False
def __snake_case ( self : Any):
a : int = MobileNetVaModelTester(self)
a : List[str] = MobileNetVaConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase)
def __snake_case ( self : List[Any]):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds")
def __snake_case ( self : Optional[int]):
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings")
def __snake_case ( self : str):
pass
@unittest.skip(reason="MobileNetV1 does not output attentions")
def __snake_case ( self : Union[str, Any]):
pass
def __snake_case ( self : int):
a , a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Optional[Any] = model_class(__UpperCAmelCase)
a : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Any = [*signature.parameters.keys()]
a : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def __snake_case ( self : Union[str, Any]):
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def __snake_case ( self : int):
def check_hidden_states_output(__UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any]):
a : List[str] = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
with torch.no_grad():
a : Any = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
a : Optional[int] = outputs.hidden_states
a : Any = 26
self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase)
a , a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : str = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Optional[int] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : int):
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase)
@slow
def __snake_case ( self : Optional[Any]):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Tuple = MobileNetVaModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def lowercase ( )-> Tuple:
'''simple docstring'''
a : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : int):
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224") if is_vision_available() else None
)
@slow
def __snake_case ( self : Dict):
a : Any = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224").to(__UpperCAmelCase)
a : Union[str, Any] = self.default_image_processor
a : Optional[int] = prepare_img()
a : List[Any] = image_processor(images=__UpperCAmelCase , return_tensors="pt").to(__UpperCAmelCase)
# forward pass
with torch.no_grad():
a : Union[str, Any] = model(**__UpperCAmelCase)
# verify the logits
a : Tuple = torch.Size((1, 1001))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a : List[str] = torch.tensor([-4.1_739, -1.1_233, 3.1_205]).to(__UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4))
| 226
|
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowercase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__lowercase = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def lowercase ( A_ , A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
a : List[str] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'''config.{attribute}''' in modeling_source
or F'''getattr(config, "{attribute}"''' in modeling_source
or F'''getattr(self.config, "{attribute}"''' in modeling_source
):
a : str = True
# Deal with multi-line cases
elif (
re.search(
RF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , A_ , )
is not None
):
a : List[Any] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
a : str = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
a : Tuple = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
a : str = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
a : int = True
if not attribute_used:
a : Dict = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
a : Optional[int] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
a : List[Any] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
a : str = True
elif attribute.endswith("_token_id" ):
a : str = True
# configuration class specific cases
if not case_allowed:
a : Union[str, Any] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
a : Optional[Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
a : Optional[int] = dict(inspect.signature(config_class.__init__ ).parameters )
a : Any = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
a : str = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
a : Dict = {}
if len(config_class.attribute_map ) > 0:
a : int = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
a : int = inspect.getsourcefile(A_ )
a : Union[str, Any] = os.path.dirname(A_ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
a : Optional[Any] = [os.path.join(A_ , A_ ) for fn in os.listdir(A_ ) if fn.startswith("modeling_" )]
# Get the source code strings
a : Tuple = []
for path in modeling_paths:
if os.path.isfile(A_ ):
with open(A_ ) as fp:
modeling_sources.append(fp.read() )
a : Optional[Any] = []
for config_param, default_value in zip(A_ , A_ ):
# `attributes` here is all the variant names for `config_param`
a : str = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(A_ , A_ , A_ , A_ ):
unused_attributes.append(attributes[0] )
return sorted(A_ )
def lowercase ( )-> str:
'''simple docstring'''
a : List[Any] = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
a : str = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda A_ : inspect.isclass(A_ )
and issubclass(A_ , A_ )
and inspect.getmodule(A_ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
a : Union[str, Any] = check_config_attributes_being_used(A_ )
if len(A_ ) > 0:
a : Dict = unused_attributes
if len(A_ ) > 0:
a : Union[str, Any] = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += F'''{name}: {attributes}\n'''
raise ValueError(A_ )
if __name__ == "__main__":
check_config_attributes()
| 226
| 1
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase = " " ) -> list:
lowerCamelCase__ : Tuple = []
lowerCamelCase__ : str = 0
for index, char in enumerate(_UpperCAmelCase ):
if char == separator:
split_words.append(string[last_index:index] )
lowerCamelCase__ : int = index + 1
elif index + 1 == len(_UpperCAmelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 50
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : int = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : int = {
"""google/realm-cc-news-pretrained-embedder""": 5_12,
"""google/realm-cc-news-pretrained-encoder""": 5_12,
"""google/realm-cc-news-pretrained-scorer""": 5_12,
"""google/realm-cc-news-pretrained-openqa""": 5_12,
"""google/realm-orqa-nq-openqa""": 5_12,
"""google/realm-orqa-nq-reader""": 5_12,
"""google/realm-orqa-wq-openqa""": 5_12,
"""google/realm-orqa-wq-reader""": 5_12,
}
_UpperCAmelCase : Any = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = RealmTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Tuple=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]="[UNK]" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : Tuple="[PAD]" , UpperCAmelCase : List[Any]="[CLS]" , UpperCAmelCase : Union[str, Any]="[MASK]" , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Optional[int] , ) -> str:
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ : Optional[int] = getattr(UpperCAmelCase , normalizer_state.pop('type' ) )
lowerCamelCase__ : Optional[Any] = do_lower_case
lowerCamelCase__ : str = strip_accents
lowerCamelCase__ : Optional[Any] = tokenize_chinese_chars
lowerCamelCase__ : int = normalizer_class(**UpperCAmelCase )
lowerCamelCase__ : str = do_lower_case
def A_ ( self : Optional[int] , UpperCAmelCase : int , **UpperCAmelCase : int ) -> List[Any]:
lowerCamelCase__ : List[Any] = PaddingStrategy.MAX_LENGTH
lowerCamelCase__ : Optional[int] = text
lowerCamelCase__ : Dict = kwargs.pop('text_pair' , UpperCAmelCase )
lowerCamelCase__ : List[Any] = kwargs.pop('return_tensors' , UpperCAmelCase )
lowerCamelCase__ : List[Any] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCAmelCase ):
if batch_text_pair is not None:
lowerCamelCase__ : Tuple = batch_text_pair[idx]
else:
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Optional[int] = super().__call__(UpperCAmelCase , UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Any = encoded_candidates.get('input_ids' )
lowerCamelCase__ : Union[str, Any] = encoded_candidates.get('attention_mask' )
lowerCamelCase__ : Tuple = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCAmelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCAmelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCAmelCase )
lowerCamelCase__ : int = {key: item for key, item in output_data.items() if len(UpperCAmelCase ) != 0}
return BatchEncoding(UpperCAmelCase , tensor_type=UpperCAmelCase )
def A_ ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None ) -> List[str]:
lowerCamelCase__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : Tuple , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : List[Any] = [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
lowerCamelCase__ : int = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 50
| 1
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = '''ClapFeatureExtractor'''
A : List[Any] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self, A, A ):
'''simple docstring'''
super().__init__(A, A )
def __call__( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('sampling_rate', A )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(A, return_tensors=A, **A )
if audios is not None:
SCREAMING_SNAKE_CASE : int = self.feature_extractor(
A, sampling_rate=A, return_tensors=A, **A )
if text is not None and audios is not None:
SCREAMING_SNAKE_CASE : List[Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ), tensor_type=A )
def UpperCamelCase_ ( self, *A, **A ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A, **A )
def UpperCamelCase_ ( self, *A, **A ):
'''simple docstring'''
return self.tokenizer.decode(*A, **A )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 353
|
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCamelCase_ = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCamelCase_ = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCamelCase_ = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ), reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
], )
def UpperCamelCase_ ( self, A, A, A=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(A, A, sample_weight=A ) ),
}
| 246
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 303
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowercase_ = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowercase_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a__ ( snake_case ):
"""simple docstring"""
if "://" in dataset_path:
__SCREAMING_SNAKE_CASE : Any = dataset_path.split('''://''' )[1]
return dataset_path
def a__ ( snake_case ):
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = not is_remote_filesystem(snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(snake_case ) , fs._strip_protocol(snake_case ) )
else:
fs.mv(snake_case , snake_case , recursive=snake_case )
def a__ ( ):
"""simple docstring"""
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = threading.Lock()
| 303
| 1
|
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , a__ : int , a__ : Union[str, Any]=14 , a__ : str=7 , a__ : Union[str, Any]=True , a__ : str=True , a__ : Tuple=True , a__ : Any=True , a__ : List[Any]=True , a__ : Optional[Any]=99 , a__ : Union[str, Any]=32 , a__ : List[str]=5 , a__ : Union[str, Any]=4 , a__ : str=37 , a__ : Tuple="gelu" , a__ : int=0.1 , a__ : int=0.1 , a__ : Tuple=512 , a__ : List[Any]=16 , a__ : Optional[Any]=2 , a__ : str=0.02 , a__ : Any=3 , a__ : Dict=4 , a__ : Any=None , ):
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_token_type_ids
__magic_name__ = use_input_mask
__magic_name__ = use_labels
__magic_name__ = use_mc_token_ids
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
__magic_name__ = self.vocab_size - 1
def snake_case__ ( self : Dict ):
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
if self.use_mc_token_ids:
__magic_name__ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = self.get_config()
__magic_name__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case__ ( self : str ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def snake_case__ ( self : Dict , a__ : str , a__ : Dict , a__ : Any , a__ : str , a__ : Dict , *a__ : Union[str, Any] ):
__magic_name__ = CTRLModel(config=a__ )
model.to(a__ )
model.eval()
model(a__ , token_type_ids=a__ , head_mask=a__ )
model(a__ , token_type_ids=a__ )
__magic_name__ = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def snake_case__ ( self : List[str] , a__ : Dict , a__ : Union[str, Any] , a__ : List[str] , a__ : List[str] , a__ : Optional[Any] , *a__ : int ):
__magic_name__ = CTRLLMHeadModel(a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = self.prepare_config_and_inputs()
(
__magic_name__
) = config_and_inputs
__magic_name__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def snake_case__ ( self : Optional[Any] , a__ : Optional[Any] , a__ : str , a__ : Tuple , a__ : int , *a__ : List[Any] ):
__magic_name__ = self.num_labels
__magic_name__ = CTRLForSequenceClassification(a__ )
model.to(a__ )
model.eval()
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = model(a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class _SCREAMING_SNAKE_CASE ( lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :List[Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE :Tuple = (CTRLLMHeadModel,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE :Optional[Any] = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE :List[Any] = True
__SCREAMING_SNAKE_CASE :List[Any] = False
__SCREAMING_SNAKE_CASE :Dict = False
def snake_case__ ( self : str , a__ : Optional[int] , a__ : List[str] , a__ : Any , a__ : Optional[int] , a__ : List[str] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def snake_case__ ( self : Optional[int] ):
__magic_name__ = CTRLModelTester(self )
__magic_name__ = ConfigTester(self , config_class=a__ , n_embd=37 )
def snake_case__ ( self : Optional[Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : int ):
self.config_tester.run_common_tests()
def snake_case__ ( self : str ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*a__ )
def snake_case__ ( self : Dict ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case__ ( self : Tuple ):
pass
@slow
def snake_case__ ( self : List[str] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = CTRLModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def snake_case__ ( self : str ):
pass
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : List[Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def snake_case__ ( self : List[Any] ):
__magic_name__ = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(a__ )
__magic_name__ = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=a__ ) # Legal the president is
__magic_name__ = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__magic_name__ = model.generate(a__ , do_sample=a__ )
self.assertListEqual(output_ids[0].tolist() , a__ )
| 354
|
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class _SCREAMING_SNAKE_CASE ( __a ,__a ):
__SCREAMING_SNAKE_CASE :Optional[Any] = 1
@register_to_config
def __init__( self : Optional[int] , a__ : int = 1000 , a__ : Optional[Union[np.ndarray, List[float]]] = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(a__ )
# standard deviation of the initial noise distribution
__magic_name__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__magic_name__ = 4
# running values
__magic_name__ = []
def snake_case__ ( self : Dict , a__ : int , a__ : Union[str, torch.device] = None ):
__magic_name__ = num_inference_steps
__magic_name__ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__magic_name__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__magic_name__ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__magic_name__ = torch.sin(steps * math.pi / 2 ) ** 2
__magic_name__ = (1.0 - self.betas**2) ** 0.5
__magic_name__ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__magic_name__ = timesteps.to(a__ )
__magic_name__ = []
def snake_case__ ( self : Union[str, Any] , a__ : torch.FloatTensor , a__ : int , a__ : torch.FloatTensor , a__ : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
__magic_name__ = (self.timesteps == timestep).nonzero().item()
__magic_name__ = timestep_index + 1
__magic_name__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(a__ )
if len(self.ets ) == 1:
__magic_name__ = self.ets[-1]
elif len(self.ets ) == 2:
__magic_name__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__magic_name__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__magic_name__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__magic_name__ = self._get_prev_sample(a__ , a__ , a__ , a__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a__ )
def snake_case__ ( self : Any , a__ : torch.FloatTensor , *a__ : Optional[int] , **a__ : Union[str, Any] ):
return sample
def snake_case__ ( self : str , a__ : Optional[int] , a__ : int , a__ : List[Any] , a__ : List[str] ):
__magic_name__ = self.alphas[timestep_index]
__magic_name__ = self.betas[timestep_index]
__magic_name__ = self.alphas[prev_timestep_index]
__magic_name__ = self.betas[prev_timestep_index]
__magic_name__ = (sample - sigma * ets) / max(a__ , 1E-8 )
__magic_name__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : int ):
return self.config.num_train_timesteps
| 98
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase__ = logging.get_logger(__name__)
class A ( a_ , a_ ):
__UpperCAmelCase : Dict = 'maskformer-swin'
__UpperCAmelCase : str = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self : List[Any] , __UpperCAmelCase : Any=2_2_4 , __UpperCAmelCase : int=4 , __UpperCAmelCase : str=3 , __UpperCAmelCase : List[Any]=9_6 , __UpperCAmelCase : List[str]=[2, 2, 6, 2] , __UpperCAmelCase : str=[3, 6, 1_2, 2_4] , __UpperCAmelCase : Any=7 , __UpperCAmelCase : List[Any]=4.0 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Optional[int]=0.0 , __UpperCAmelCase : Optional[int]=0.0 , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : str="gelu" , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Tuple=1E-5 , __UpperCAmelCase : int=None , __UpperCAmelCase : int=None , **__UpperCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = depths
UpperCAmelCase__ = len(__UpperCAmelCase )
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = window_size
UpperCAmelCase__ = mlp_ratio
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = use_absolute_embeddings
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase__ = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
UpperCAmelCase__ = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(__UpperCAmelCase ) + 1 )]
UpperCAmelCase__ , UpperCAmelCase__ = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
| 65
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = ["image_processor", "tokenizer"]
_UpperCAmelCase = "LayoutLMv2ImageProcessor"
_UpperCAmelCase = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self: int , UpperCamelCase: Optional[int]=None , UpperCamelCase: Optional[Any]=None , **UpperCamelCase: Union[str, Any] ) -> int:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
snake_case__ = kwargs.pop('feature_extractor' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCamelCase: Union[List[List[int]], List[List[List[int]]]] = None , UpperCamelCase: Optional[Union[List[int], List[List[int]]]] = None , UpperCamelCase: bool = True , UpperCamelCase: Union[bool, str, PaddingStrategy] = False , UpperCamelCase: Union[bool, str, TruncationStrategy] = None , UpperCamelCase: Optional[int] = None , UpperCamelCase: int = 0 , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[str, TensorType]] = None , **UpperCamelCase: Any , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
snake_case__ = self.image_processor(images=UpperCamelCase , return_tensors=UpperCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case__ = features['words']
snake_case__ = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , )
# add pixel values
snake_case__ = features.pop('pixel_values' )
if return_overflowing_tokens is True:
snake_case__ = self.get_overflowing_images(UpperCamelCase , encoded_inputs['overflow_to_sample_mapping'] )
snake_case__ = images
return encoded_inputs
def lowerCAmelCase_ ( self: Any , UpperCamelCase: Optional[int] , UpperCamelCase: Any ) -> Tuple:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCamelCase ) != len(UpperCamelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F''' {len(UpperCamelCase )} and {len(UpperCamelCase )}''' )
return images_with_overflow
def lowerCAmelCase_ ( self: Dict , *UpperCamelCase: Dict , **UpperCamelCase: Optional[int] ) -> List[Any]:
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] , *UpperCamelCase: Optional[Any] , **UpperCamelCase: int ) -> Optional[Any]:
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def lowerCAmelCase_ ( self: str ) -> List[Any]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowerCAmelCase_ ( self: Any ) -> List[Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase , )
return self.image_processor_class
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase , )
return self.image_processor
| 307
| 0
|
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A_ : Dict = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> None:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), F'''{len(lowerCAmelCase_ )} != {len(lowerCAmelCase_ )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A_ : Union[str, Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A_ : int = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
try:
_UpperCAmelCase : Any = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[int]:
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(lowerCAmelCase_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ = "student" , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , )-> Tuple[PreTrainedModel, List[int], List[int]]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
AutoTokenizer.from_pretrained(lowerCAmelCase_ ).save_pretrained(lowerCAmelCase_ ) # purely for convenience
_UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ ).eval()
else:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), F'''teacher must be a model or string got type {type(lowerCAmelCase_ )}'''
_UpperCAmelCase : str = teacher.config.to_diff_dict()
try:
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCAmelCase : Tuple = teacher_e
if d is None:
_UpperCAmelCase : Dict = teacher_d
init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} )
except AttributeError: # T5
if hasattr(teacher.config , """num_encoder_layers""" ):
_UpperCAmelCase ,_UpperCAmelCase : int = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCAmelCase ,_UpperCAmelCase : int = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCAmelCase : List[str] = teacher_e
if d is None:
_UpperCAmelCase : str = teacher_d
if hasattr(teacher.config , """num_encoder_layers""" ):
init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} )
else:
init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase_ )
# Copy weights
_UpperCAmelCase : Any = teacher.config_class(**lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCAmelCase : Optional[Any] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = list(range(lowerCAmelCase_ ) ), list(range(lowerCAmelCase_ ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(lowerCAmelCase_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase_ , lowerCAmelCase_ )
if d_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase_ , lowerCAmelCase_ )
try:
if hasattr(
lowerCAmelCase_ , """prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase_ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase_ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase_ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase_ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase_ )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
_UpperCAmelCase : Dict = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 349
|
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Any = [10, 20, 30, 40, 50, 60]
_UpperCAmelCase : Dict = [2, 4, 6, 8, 10, 12]
_UpperCAmelCase : Optional[int] = 100
self.assertEqual(kp.calc_profit(a_ ,a_ ,a_ ) ,210 )
def _snake_case ( self ) -> Union[str, Any]:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Any:
self.assertRaisesRegex(a_ ,"""Weight can not be negative.""" )
def _snake_case ( self ) -> Optional[Any]:
self.assertRaisesRegex(a_ ,"""Profit can not be negative.""" )
def _snake_case ( self ) -> Dict:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Tuple:
self.assertRaisesRegex(
a_ ,"""The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 349
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _A:
"""simple docstring"""
def __init__( self , _A , ):
__A : List[str] = parent
__A : Dict = 13
__A : Optional[int] = 7
__A : str = True
__A : Any = True
__A : Dict = True
__A : str = True
__A : str = True
__A : Optional[int] = False
__A : Any = False
__A : Any = False
__A : Union[str, Any] = 2
__A : Tuple = 99
__A : Optional[int] = 0
__A : Tuple = 32
__A : Dict = 2
__A : Union[str, Any] = 4
__A : List[str] = 0.1
__A : Dict = 0.1
__A : List[Any] = 512
__A : str = 16
__A : int = 2
__A : Optional[int] = 0.0_2
__A : List[str] = 3
__A : str = 4
__A : Optional[Any] = 'last'
__A : Union[str, Any] = True
__A : Dict = None
__A : Union[str, Any] = 0
def UpperCAmelCase_ ( self ):
__A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : str = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__A : Optional[Any] = None
if self.use_input_lengths:
__A : Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__A : List[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__A : Optional[Any] = None
__A : Union[str, Any] = None
__A : Tuple = None
if self.use_labels:
__A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : List[str] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__A : str = ids_tensor([self.batch_size] , self.num_choices )
__A : Optional[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : str = TFFlaubertModel(config=_A )
__A : int = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__A : Optional[Any] = model(_A )
__A : Tuple = [input_ids, input_mask]
__A : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : int = TFFlaubertWithLMHeadModel(_A )
__A : Union[str, Any] = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__A : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : List[str] = TFFlaubertForQuestionAnsweringSimple(_A )
__A : Optional[int] = {'input_ids': input_ids, 'lengths': input_lengths}
__A : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Union[str, Any] = TFFlaubertForSequenceClassification(_A )
__A : Optional[Any] = {'input_ids': input_ids, 'lengths': input_lengths}
__A : Tuple = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Union[str, Any] = self.num_labels
__A : Union[str, Any] = TFFlaubertForTokenClassification(config=_A )
__A : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Any = self.num_choices
__A : Optional[Any] = TFFlaubertForMultipleChoice(config=_A )
__A : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__A : Tuple = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__A : List[Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__A : List[str] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : Optional[Any] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class _A( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : int = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase : int = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase : int = (
{
'feature-extraction': TFFlaubertModel,
'fill-mask': TFFlaubertWithLMHeadModel,
'question-answering': TFFlaubertForQuestionAnsweringSimple,
'text-classification': TFFlaubertForSequenceClassification,
'token-classification': TFFlaubertForTokenClassification,
'zero-shot': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase : List[Any] = False
UpperCamelCase : Dict = False
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase_ ( self ):
__A : Tuple = TFFlaubertModelTester(self )
__A : str = ConfigTester(self , config_class=_A , emb_dim=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_A )
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_A )
def UpperCAmelCase_ ( self ):
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_A )
def UpperCAmelCase_ ( self ):
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_A )
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*_A )
def UpperCAmelCase_ ( self ):
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*_A )
@slow
def UpperCAmelCase_ ( self ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Optional[Any] = TFFlaubertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_tf
@require_sentencepiece
@require_tokenizers
class _A( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__A : List[Any] = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__A : Tuple = model(_A )[0]
__A : List[Any] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , _A )
# compare the actual values for a slice.
__A : int = tf.convert_to_tensor(
[
[
[-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8],
[-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9],
[-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 280
|
from __future__ import annotations
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in range(1 ,len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 ,len(__UpperCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 ,len(__UpperCamelCase ) ):
for j in range(1 ,len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
UpperCamelCase__ = len(UpperCamelCase__ )
UpperCamelCase__ = max(UpperCamelCase__ )
UpperCamelCase__ = min(UpperCamelCase__ )
# create the counting array
UpperCamelCase__ = coll_max + 1 - coll_min
UpperCamelCase__ = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1, UpperCamelCase__ ):
UpperCamelCase__ = counting_arr[i] + counting_arr[i - 1]
# create the output collection
UpperCamelCase__ = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0, UpperCamelCase__ ) ):
UpperCamelCase__ = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCamelCase_ ( UpperCamelCase__ : Dict ):
'''simple docstring'''
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
lowercase = input("""Enter numbers separated by a comma:\n""").strip()
lowercase = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted))
| 35
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowercase = get_logger(__name__)
class __lowercase :
'''simple docstring'''
def __init__( self : Dict , _a : Optional[str] = None ):
UpperCamelCase__ = (
os.path.join(_a , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
UpperCamelCase__ = Extractor
def A_ ( self : str , _a : str ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
UpperCamelCase__ = os.path.abspath(_a )
return os.path.join(self.extract_dir , hash_url_to_filename(_a ) )
def A_ ( self : Optional[Any] , _a : str , _a : bool ):
return force_extract or (
not os.path.isfile(_a ) and not (os.path.isdir(_a ) and os.listdir(_a ))
)
def A_ ( self : int , _a : str , _a : bool = False ):
UpperCamelCase__ = self.extractor.infer_extractor_format(_a )
if not extractor_format:
return input_path
UpperCamelCase__ = self._get_output_path(_a )
if self._do_extract(_a , _a ):
self.extractor.extract(_a , _a , _a )
return output_path
class __lowercase ( A ):
'''simple docstring'''
@classmethod
@abstractmethod
def A_ ( cls : List[Any] , _a : Union[Path, str] , **_a : List[str] ):
...
@staticmethod
@abstractmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
...
class __lowercase ( A, A ):
'''simple docstring'''
_A : List[bytes] = []
@staticmethod
def A_ ( _a : Union[Path, str] , _a : int ):
with open(_a , '''rb''' ) as f:
return f.read(_a )
@classmethod
def A_ ( cls : str , _a : Union[Path, str] , _a : bytes = b"" ):
if not magic_number:
UpperCamelCase__ = max(len(_a ) for cls_magic_number in cls.magic_numbers )
try:
UpperCamelCase__ = cls.read_magic_number(_a , _a )
except OSError:
return False
return any(magic_number.startswith(_a ) for cls_magic_number in cls.magic_numbers )
class __lowercase ( A ):
'''simple docstring'''
@classmethod
def A_ ( cls : Union[str, Any] , _a : Union[Path, str] , **_a : Any ):
return tarfile.is_tarfile(_a )
@staticmethod
def A_ ( _a : int , _a : List[str] ):
def resolved(_a : str ) -> str:
return os.path.realpath(os.path.abspath(_a ) )
def badpath(_a : str , _a : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_a , _a ) ).startswith(_a )
def badlink(_a : Tuple , _a : str ) -> bool:
# Links are interpreted relative to the directory containing the link
UpperCamelCase__ = resolved(os.path.join(_a , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_a )
UpperCamelCase__ = resolved(_a )
for finfo in members:
if badpath(finfo.name , _a ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(_a , _a ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(_a , _a ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
os.makedirs(_a , exist_ok=_a )
UpperCamelCase__ = tarfile.open(_a )
tar_file.extractall(_a , members=TarExtractor.safemembers(_a , _a ) )
tar_file.close()
class __lowercase ( A ):
'''simple docstring'''
_A : int = [b'''\x1F\x8B''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
with gzip.open(_a , '''rb''' ) as gzip_file:
with open(_a , '''wb''' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class __lowercase ( A ):
'''simple docstring'''
_A : int = [
b'''PK\x03\x04''',
b'''PK\x05\x06''', # empty archive
b'''PK\x07\x08''', # spanned archive
]
@classmethod
def A_ ( cls : Dict , _a : Union[Path, str] , _a : bytes = b"" ):
if super().is_extractable(_a , magic_number=_a ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_a , '''rb''' ) as fp:
UpperCamelCase__ = _EndRecData(_a )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
UpperCamelCase__ = fp.read(_a ) # CD is where we expect it to be
if len(_a ) == sizeCentralDir:
UpperCamelCase__ = struct.unpack(_a , _a ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
os.makedirs(_a , exist_ok=_a )
with zipfile.ZipFile(_a , '''r''' ) as zip_file:
zip_file.extractall(_a )
zip_file.close()
class __lowercase ( A ):
'''simple docstring'''
_A : Tuple = [b'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
with lzma.open(_a ) as compressed_file:
with open(_a , '''wb''' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class __lowercase ( A ):
'''simple docstring'''
_A : Union[str, Any] = [b'''Rar!\x1a\x07\x00''', b'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(_a , exist_ok=_a )
UpperCamelCase__ = rarfile.RarFile(_a )
rf.extractall(_a )
rf.close()
class __lowercase ( A ):
'''simple docstring'''
_A : Optional[Any] = [b'''\x28\xb5\x2F\xFD''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
UpperCamelCase__ = zstd.ZstdDecompressor()
with open(_a , '''rb''' ) as ifh, open(_a , '''wb''' ) as ofh:
dctx.copy_stream(_a , _a )
class __lowercase ( A ):
'''simple docstring'''
_A : Any = [b'''\x42\x5A\x68''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
with bza.open(_a , '''rb''' ) as compressed_file:
with open(_a , '''wb''' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class __lowercase ( A ):
'''simple docstring'''
_A : Optional[int] = [b'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(_a , exist_ok=_a )
with pyazr.SevenZipFile(_a , '''r''' ) as archive:
archive.extractall(_a )
class __lowercase ( A ):
'''simple docstring'''
_A : Union[str, Any] = [b'''\x04\x22\x4D\x18''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(_a , '''rb''' ) as compressed_file:
with open(_a , '''wb''' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class __lowercase :
'''simple docstring'''
_A : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def A_ ( cls : Dict ):
return max(
len(_a )
for extractor in cls.extractors.values()
if issubclass(_a , _a )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def A_ ( _a : Union[Path, str] , _a : int ):
try:
return MagicNumberBaseExtractor.read_magic_number(_a , magic_number_length=_a )
except OSError:
return b""
@classmethod
def A_ ( cls : Optional[Any] , _a : Union[Path, str] , _a : bool = False ):
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=_a , )
UpperCamelCase__ = cls.infer_extractor_format(_a )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def A_ ( cls : str , _a : Union[Path, str] ): # <Added version="2.4.0"/>
UpperCamelCase__ = cls._get_magic_number_max_length()
UpperCamelCase__ = cls._read_magic_number(_a , _a )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_a , magic_number=_a ):
return extractor_format
@classmethod
def A_ ( cls : List[Any] , _a : Union[Path, str] , _a : Union[Path, str] , _a : Optional[str] = None , _a : Optional[BaseExtractor] = "deprecated" , ):
os.makedirs(os.path.dirname(_a ) , exist_ok=_a )
# Prevent parallel extractions
UpperCamelCase__ = str(Path(_a ).with_suffix('''.lock''' ) )
with FileLock(_a ):
shutil.rmtree(_a , ignore_errors=_a )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_a , _a ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=_a , )
UpperCamelCase__ = extractor if extractor != '''deprecated''' else extractor_format
else:
UpperCamelCase__ = cls.extractors[extractor_format]
return extractor.extract(_a , _a )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=_a , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_a ):
return extractor.extract(_a , _a )
| 35
| 1
|
'''simple docstring'''
UpperCamelCase__: Optional[int] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : int ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = [False] * len(__lowerCamelCase )
UpperCAmelCase : str = [s]
UpperCAmelCase : Optional[Any] = True
while queue:
UpperCAmelCase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCamelCase )
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : str = u
return visited[t]
def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str ) -> Dict:
UpperCAmelCase : Optional[int] = [-1] * (len(__lowerCamelCase ))
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : int = [i[:] for i in graph] # Record original cut, copy.
while bfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
UpperCAmelCase : List[str] = float('''Inf''' )
UpperCAmelCase : Optional[Any] = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase : Tuple = min(__lowerCamelCase , graph[parent[s]][s] )
UpperCAmelCase : Optional[Any] = parent[s]
max_flow += path_flow
UpperCAmelCase : Tuple = sink
while v != source:
UpperCAmelCase : Tuple = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase : Tuple = parent[v]
for i in range(len(__lowerCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 23
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict, lowerCamelCase : list )-> None:
lowerCamelCase__ : Tuple =set_counts
lowerCamelCase__ : Dict =max(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =len(lowerCamelCase )
lowerCamelCase__ : List[str] =[1] * num_sets
lowerCamelCase__ : str =list(range(lowerCamelCase ) )
def snake_case ( self : Tuple, lowerCamelCase : int, lowerCamelCase : int )-> bool:
lowerCamelCase__ : List[Any] =self.get_parent(lowerCamelCase )
lowerCamelCase__ : str =self.get_parent(lowerCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowerCamelCase__ : Tuple =0
lowerCamelCase__ : int =dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowerCamelCase__ : str =self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : List[str] =src_parent
lowerCamelCase__ : Dict =self.set_counts[src_parent]
lowerCamelCase__ : str =max(self.max_set, lowerCamelCase )
return True
def snake_case ( self : Union[str, Any], lowerCamelCase : int )-> int:
if self.parents[disj_set] == disj_set:
return disj_set
lowerCamelCase__ : Tuple =self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 238
| 0
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase : int =logging.get_logger(__name__)
# General docstring
lowerCAmelCase : Any ='''RegNetConfig'''
# Base docstring
lowerCAmelCase : Optional[Any] ='''facebook/regnet-y-040'''
lowerCAmelCase : Optional[int] =[1, 1_088, 7, 7]
# Image classification docstring
lowerCAmelCase : Dict ='''facebook/regnet-y-040'''
lowerCAmelCase : List[Any] ='''tabby, tabby cat'''
lowerCAmelCase : Optional[int] =[
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] , lowercase : int , lowercase : int = 3 , lowercase : int = 1 , lowercase : int = 1 , lowercase : Optional[str] = "relu" , **lowercase : List[str] , ):
"""simple docstring"""
super().__init__(**lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase_ :Optional[int] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase_ :Optional[Any] = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=lowercase , strides=lowercase , padding="VALID" , groups=lowercase , use_bias=lowercase , name="convolution" , )
lowercase_ :Union[str, Any] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
lowercase_ :Dict = ACTaFN[activation] if activation is not None else tf.identity
def lowercase__ ( self : Any , lowercase : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Tuple = self.convolution(self.padding(lowercase ) )
lowercase_ :Any = self.normalization(lowercase )
lowercase_ :Dict = self.activation(lowercase )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowercase : RegNetConfig , **lowercase : int ):
"""simple docstring"""
super().__init__(**lowercase )
lowercase_ :List[str] = config.num_channels
lowercase_ :str = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def lowercase__ ( self : Dict , lowercase : int ):
"""simple docstring"""
lowercase_ :List[Any] = shape_list(lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase_ :Dict = tf.transpose(lowercase , perm=(0, 2, 3, 1) )
lowercase_ :Optional[Any] = self.embedder(lowercase )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self : str , lowercase : int , lowercase : int = 2 , **lowercase : List[str] ):
"""simple docstring"""
super().__init__(**lowercase )
lowercase_ :Union[str, Any] = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name="convolution" )
lowercase_ :Tuple = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
def lowercase__ ( self : str , lowercase : tf.Tensor , lowercase : bool = False ):
"""simple docstring"""
return self.normalization(self.convolution(lowercase ) , training=lowercase )
class a_ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowercase : int , lowercase : int , **lowercase : Union[str, Any] ):
"""simple docstring"""
super().__init__(**lowercase )
lowercase_ :str = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name="pooler" )
lowercase_ :Optional[Any] = [
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def lowercase__ ( self : Dict , lowercase : Dict ):
"""simple docstring"""
lowercase_ :str = self.pooler(lowercase )
for layer_module in self.attention:
lowercase_ :Tuple = layer_module(lowercase )
lowercase_ :str = hidden_state * pooled
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self : str , lowercase : RegNetConfig , lowercase : int , lowercase : int , lowercase : int = 1 , **lowercase : Any ):
"""simple docstring"""
super().__init__(**lowercase )
lowercase_ :List[str] = in_channels != out_channels or stride != 1
lowercase_ :int = max(1 , out_channels // config.groups_width )
lowercase_ :List[str] = (
TFRegNetShortCut(lowercase , stride=lowercase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase_ :str = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name="layer.2" ),
]
lowercase_ :Any = ACTaFN[config.hidden_act]
def lowercase__ ( self : Any , lowercase : str ):
"""simple docstring"""
lowercase_ :int = hidden_state
for layer_module in self.layers:
lowercase_ :Optional[Any] = layer_module(lowercase )
lowercase_ :Dict = self.shortcut(lowercase )
hidden_state += residual
lowercase_ :Tuple = self.activation(lowercase )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowercase : RegNetConfig , lowercase : int , lowercase : int , lowercase : int = 1 , **lowercase : str ):
"""simple docstring"""
super().__init__(**lowercase )
lowercase_ :List[Any] = in_channels != out_channels or stride != 1
lowercase_ :Dict = max(1 , out_channels // config.groups_width )
lowercase_ :Union[str, Any] = (
TFRegNetShortCut(lowercase , stride=lowercase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
lowercase_ :Optional[int] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name="layer.3" ),
]
lowercase_ :Dict = ACTaFN[config.hidden_act]
def lowercase__ ( self : int , lowercase : List[str] ):
"""simple docstring"""
lowercase_ :Optional[Any] = hidden_state
for layer_module in self.layers:
lowercase_ :Tuple = layer_module(lowercase )
lowercase_ :str = self.shortcut(lowercase )
hidden_state += residual
lowercase_ :Tuple = self.activation(lowercase )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self : Any , lowercase : RegNetConfig , lowercase : int , lowercase : int , lowercase : int = 2 , lowercase : int = 2 , **lowercase : Optional[Any] ):
"""simple docstring"""
super().__init__(**lowercase )
lowercase_ :Union[str, Any] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
lowercase_ :Tuple = [
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , lowercase , stride=lowercase , name="layers.0" ),
*[layer(lowercase , lowercase , lowercase , name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def lowercase__ ( self : Union[str, Any] , lowercase : Dict ):
"""simple docstring"""
for layer_module in self.layers:
lowercase_ :Any = layer_module(lowercase )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowercase : RegNetConfig , **lowercase : Optional[Any] ):
"""simple docstring"""
super().__init__(**lowercase )
lowercase_ :Tuple = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
lowercase_ :Optional[int] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'stages.{i+1}' ) )
def lowercase__ ( self : List[Any] , lowercase : tf.Tensor , lowercase : bool = False , lowercase : bool = True ):
"""simple docstring"""
lowercase_ :List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ :Any = hidden_states + (hidden_state,)
lowercase_ :List[Any] = stage_module(lowercase )
if output_hidden_states:
lowercase_ :Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
@keras_serializable
class a_ ( tf.keras.layers.Layer ):
__A = RegNetConfig
def __init__( self : str , lowercase : Optional[int] , **lowercase : int ):
"""simple docstring"""
super().__init__(**lowercase )
lowercase_ :Union[str, Any] = config
lowercase_ :List[Any] = TFRegNetEmbeddings(lowercase , name="embedder" )
lowercase_ :Optional[Any] = TFRegNetEncoder(lowercase , name="encoder" )
lowercase_ :Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name="pooler" )
@unpack_inputs
def lowercase__ ( self : int , lowercase : tf.Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , lowercase : bool = False , ):
"""simple docstring"""
lowercase_ :List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ :Dict = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ :str = self.embedder(lowercase , training=lowercase )
lowercase_ :Tuple = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
lowercase_ :List[str] = encoder_outputs[0]
lowercase_ :Optional[Any] = self.pooler(lowercase )
# Change to NCHW output format have uniformity in the modules
lowercase_ :List[str] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
lowercase_ :Optional[int] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase_ :Tuple = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class a_ ( _lowerCAmelCase ):
__A = RegNetConfig
__A = "regnet"
__A = "pixel_values"
@property
def lowercase__ ( self : Tuple ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCAmelCase : str =r'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCAmelCase : List[Any] =r'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , _lowerCAmelCase , )
class a_ ( _lowerCAmelCase ):
def __init__( self : Any , lowercase : RegNetConfig , *lowercase : Optional[int] , **lowercase : Any ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
lowercase_ :Optional[Any] = TFRegNetMainLayer(lowercase , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase__ ( self : Union[str, Any] , lowercase : tf.Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , lowercase : Union[str, Any]=False , ):
"""simple docstring"""
lowercase_ :List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ :List[str] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ :Dict = self.regnet(
pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _lowerCAmelCase , )
class a_ ( _lowerCAmelCase , _lowerCAmelCase ):
def __init__( self : Optional[Any] , lowercase : RegNetConfig , *lowercase : List[Any] , **lowercase : Optional[Any] ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
lowercase_ :List[Any] = config.num_labels
lowercase_ :Any = TFRegNetMainLayer(lowercase , name="regnet" )
# classification head
lowercase_ :Optional[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase__ ( self : Optional[int] , lowercase : tf.Tensor = None , lowercase : tf.Tensor = None , lowercase : bool = None , lowercase : bool = None , lowercase : List[str]=False , ):
"""simple docstring"""
lowercase_ :Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ :List[str] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ :Any = self.regnet(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
lowercase_ :str = outputs.pooler_output if return_dict else outputs[1]
lowercase_ :Union[str, Any] = self.classifier[0](lowercase )
lowercase_ :Optional[Any] = self.classifier[1](lowercase )
lowercase_ :Union[str, Any] = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase )
if not return_dict:
lowercase_ :Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
| 147
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
class a_ ( _lowerCAmelCase ):
__A = ["input_features"]
def __init__( self : Any , lowercase : Tuple=80 , lowercase : Optional[int]=16_000 , lowercase : Optional[Any]=160 , lowercase : Optional[int]=30 , lowercase : List[Any]=400 , lowercase : Dict=0.0 , lowercase : Tuple=False , **lowercase : Optional[int] , ):
"""simple docstring"""
super().__init__(
feature_size=lowercase , sampling_rate=lowercase , padding_value=lowercase , return_attention_mask=lowercase , **lowercase , )
lowercase_ :Optional[int] = n_fft
lowercase_ :List[Any] = hop_length
lowercase_ :Tuple = chunk_length
lowercase_ :List[str] = chunk_length * sampling_rate
lowercase_ :Optional[Any] = self.n_samples // hop_length
lowercase_ :Any = sampling_rate
lowercase_ :List[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowercase , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=lowercase , norm="slaney" , mel_scale="slaney" , )
def lowercase__ ( self : str , lowercase : np.array ):
"""simple docstring"""
lowercase_ :Any = spectrogram(
lowercase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
lowercase_ :Any = log_spec[:, :-1]
lowercase_ :List[Any] = np.maximum(lowercase , log_spec.max() - 8.0 )
lowercase_ :Dict = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase__ ( lowercase : List[np.ndarray] , lowercase : List[np.ndarray] , lowercase : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
lowercase_ :Optional[int] = np.array(lowercase , np.intaa )
lowercase_ :Any = []
for vector, length in zip(lowercase , attention_mask.sum(-1 ) ):
lowercase_ :Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowercase_ :List[Any] = padding_value
normed_input_values.append(lowercase )
else:
lowercase_ :List[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : Tuple , lowercase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase : bool = True , lowercase : Optional[int] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Optional[bool] = None , lowercase : Optional[str] = "max_length" , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : Optional[bool] = None , **lowercase : Union[str, Any] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase_ :List[str] = isinstance(lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowercase_ :Optional[Any] = is_batched_numpy or (
isinstance(lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase_ :Any = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowercase , np.ndarray ):
lowercase_ :List[Any] = np.asarray(lowercase , dtype=np.floataa )
elif isinstance(lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase_ :Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ :Optional[int] = [np.asarray([raw_speech] ).T]
lowercase_ :int = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
lowercase_ :Tuple = self.pad(
lowercase , padding=lowercase , max_length=max_length if max_length else self.n_samples , truncation=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase_ :Union[str, Any] = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
lowercase_ :List[Any] = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
lowercase_ :Union[str, Any] = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
lowercase_ :List[str] = [self._np_extract_fbank_features(lowercase ) for waveform in input_features[0]]
if isinstance(input_features[0] , lowercase ):
lowercase_ :Tuple = [np.asarray(lowercase , dtype=np.floataa ) for feature in input_features]
else:
lowercase_ :Union[str, Any] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase_ :Dict = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
lowercase_ :Tuple = padded_inputs.convert_to_tensors(lowercase )
return padded_inputs
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :Union[str, Any] = copy.deepcopy(self.__dict__ )
lowercase_ :List[str] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 147
| 1
|
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
UpperCamelCase__ : str = [0] * len(__lowerCamelCase )
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : str = []
UpperCamelCase__ : Tuple = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__lowerCamelCase ) ):
if indegree[i] == 0:
queue.append(__lowerCamelCase )
while queue:
UpperCamelCase__ : List[Any] = queue.pop(0 )
cnt += 1
topo.append(__lowerCamelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__lowerCamelCase )
if cnt != len(__lowerCamelCase ):
print("Cycle exists" )
else:
print(__lowerCamelCase )
# Adjacency List of Graph
lowerCamelCase : Dict ={0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 189
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_snake_case : int = logging.get_logger(__name__)
class a :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : str = None , lowerCamelCase : uuid.UUID = None , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None ) -> int:
if not conversation_id:
__snake_case : Optional[Any] = uuid.uuida()
if past_user_inputs is None:
__snake_case : List[Any] = []
if generated_responses is None:
__snake_case : Optional[int] = []
__snake_case : uuid.UUID = conversation_id
__snake_case : List[str] = past_user_inputs
__snake_case : List[str] = generated_responses
__snake_case : Optional[str] = text
def __eq__( self : int , lowerCamelCase : List[str] ) -> Dict:
if not isinstance(lowerCamelCase , lowerCamelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __snake_case ( self : Optional[int] , lowerCamelCase : str , lowerCamelCase : bool = False ) -> Optional[Any]:
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
__snake_case : Dict = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
__snake_case : List[str] = text
def __snake_case ( self : List[str] ) -> Dict:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__snake_case : Optional[Any] = None
def __snake_case ( self : List[str] , lowerCamelCase : str ) -> List[Any]:
self.generated_responses.append(lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> List[str]:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Optional[Any] ) -> Dict:
__snake_case : Any = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
__snake_case : List[Any] = "user" if is_user else "bot"
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
_lowerCAmelCase , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , *lowerCamelCase : Optional[Any] , **lowerCamelCase : List[str] ) -> Any:
super().__init__(*lowerCamelCase , **lowerCamelCase )
if self.tokenizer.pad_token_id is None:
__snake_case : Dict = self.tokenizer.eos_token
def __snake_case ( self : Dict , lowerCamelCase : List[str]=None , lowerCamelCase : int=None , lowerCamelCase : Optional[Any]=None , **lowerCamelCase : Any ) -> Any:
__snake_case : Union[str, Any] = {}
__snake_case : Optional[int] = {}
__snake_case : Optional[Any] = {}
if min_length_for_response is not None:
__snake_case : int = min_length_for_response
if minimum_tokens is not None:
__snake_case : Tuple = minimum_tokens
if "max_length" in generate_kwargs:
__snake_case : Any = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__snake_case : Any = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowerCamelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , lowerCamelCase : Union[Conversation, List[Conversation]] , lowerCamelCase : Optional[Any]=0 , **lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
__snake_case : Optional[Any] = super().__call__(lowerCamelCase , num_workers=lowerCamelCase , **lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) == 1:
return outputs[0]
return outputs
def __snake_case ( self : Any , lowerCamelCase : Conversation , lowerCamelCase : Any=32 ) -> Dict[str, Any]:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
__snake_case : Tuple = self.tokenizer._build_conversation_input_ids(lowerCamelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__snake_case : Tuple = self._legacy_parse_and_tokenize(lowerCamelCase )
if self.framework == "pt":
__snake_case : Union[str, Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__snake_case : Union[str, Any] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __snake_case ( self : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict=10 , **lowerCamelCase : Tuple ) -> List[str]:
__snake_case : Tuple = generate_kwargs.get("max_length" , self.model.config.max_length )
__snake_case : List[str] = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
__snake_case : Optional[int] = max_length - minimum_tokens
__snake_case : List[Any] = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
__snake_case : str = model_inputs["attention_mask"][:, -trim:]
__snake_case : Any = model_inputs.pop("conversation" )
__snake_case : str = max_length
__snake_case : Optional[int] = self.model.generate(**lowerCamelCase , **lowerCamelCase )
if self.model.config.is_encoder_decoder:
__snake_case : List[Any] = 1
else:
__snake_case : Union[str, Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __snake_case ( self : Any , lowerCamelCase : List[Any] , lowerCamelCase : Tuple=True ) -> Any:
__snake_case : Optional[int] = model_outputs["output_ids"]
__snake_case : Optional[Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , )
__snake_case : Optional[int] = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowerCamelCase )
return conversation
def __snake_case ( self : Optional[Any] , lowerCamelCase : Conversation ) -> Dict:
__snake_case : Optional[Any] = self.tokenizer.eos_token_id
__snake_case : Any = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) )
if len(lowerCamelCase ) > self.tokenizer.model_max_length:
__snake_case : Tuple = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 123
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=UpperCamelCase , )
assert hasattr(self , 'env' )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : List[str]=1 ):
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , )
def UpperCamelCase_ ( self : Any , UpperCamelCase : int ):
'''simple docstring'''
TrainingJobAnalytics(UpperCamelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Tuple = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_snake_case : Optional[int] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_snake_case : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_snake_case : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_snake_case : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , UpperCamelCase )
| 260
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =RobertaTokenizer
a_ : Tuple =RobertaTokenizerFast
a_ : Union[str, Any] =True
a_ : List[Any] ={"""cls_token""": """<s>"""}
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case : Optional[int] = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
_snake_case : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case : List[str] = {'unk_token': '<unk>'}
_snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase ) )
def UpperCamelCase_ ( self : List[str] , **UpperCamelCase : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = 'lower newer'
_snake_case : int = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case : List[str] = 'lower newer'
_snake_case : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_snake_case : Any = tokenizer.tokenize(UpperCamelCase ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase , UpperCamelCase )
_snake_case : Any = tokens + [tokenizer.unk_token]
_snake_case : Dict = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Any = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=UpperCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=UpperCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer_class.from_pretrained('roberta-base' )
_snake_case : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=UpperCamelCase )
_snake_case : int = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCamelCase )
_snake_case : Dict = tokenizer.encode(
'sequence builders' , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : Optional[int] = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
_snake_case : Tuple = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizer()
_snake_case : int = 'Encode this sequence.'
_snake_case : str = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
_snake_case : int = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase , UpperCamelCase )
_snake_case : Optional[int] = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase , UpperCamelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_snake_case : List[Any] = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
_snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase , UpperCamelCase )
# Testing spaces after special tokens
_snake_case : Dict = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase )} ) # mask token has a left space
_snake_case : int = tokenizer.convert_tokens_to_ids(UpperCamelCase )
_snake_case : List[Any] = 'Encode <mask> sequence'
_snake_case : Any = 'Encode <mask>sequence'
_snake_case : Optional[int] = tokenizer.encode(UpperCamelCase )
_snake_case : str = encoded.index(UpperCamelCase )
_snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase , UpperCamelCase )
_snake_case : Tuple = tokenizer.encode(UpperCamelCase )
_snake_case : Tuple = encoded.index(UpperCamelCase )
_snake_case : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Any = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
_snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
_snake_case : Tuple = 'A, <mask> AllenNLP sentence.'
_snake_case : str = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
_snake_case : Optional[int] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case : Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_snake_case : int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_snake_case : Dict = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , UpperCamelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , UpperCamelCase )
self.assertEqual(post_processor_state['trim_offsets'] , UpperCamelCase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : List[str] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_snake_case : Tuple = f"""{text_of_1_token} {text_of_1_token}"""
_snake_case : int = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : int = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase ) + 1, len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Tuple = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase ) + 1, len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Optional[int] = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase ), len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : Any = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Tuple = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase ), len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : str = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Dict = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase ) + 1, 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : str = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase ), 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Any = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase ), 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
| 260
| 1
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {'tokenizer_file': 'tokenizer.json'}
_snake_case = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Dict = VOCAB_FILES_NAMES
UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : List[str] = ['''input_ids''', '''attention_mask''']
UpperCamelCase : str = None
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int="<unk>" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : List[Any]="</s>" , UpperCAmelCase__ : Any="<pad>" , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Union[str, Any]=False , **UpperCAmelCase__ : Any , ) -> Optional[Any]:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ , **UpperCAmelCase__ , )
_a : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase__ ) != add_prefix_space:
_a : Optional[Any] = getattr(UpperCAmelCase__ , pre_tok_state.pop("""type""" ) )
_a : Optional[Any] = add_prefix_space
_a : Union[str, Any] = pre_tok_class(**UpperCAmelCase__ )
_a : Optional[Any] = add_prefix_space
def _lowercase ( self : int , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Tuple ) -> BatchEncoding:
_a : List[str] = kwargs.get("""is_split_into_words""" , UpperCAmelCase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
_a : List[Any] = kwargs.get("""is_split_into_words""" , UpperCAmelCase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
""" pretokenized inputs.""" )
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
_a : Optional[int] = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def _lowercase ( self : str , UpperCAmelCase__ : "Conversation" ) -> List[int]:
_a : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) + [self.eos_token_id] )
if len(UpperCAmelCase__ ) > self.model_max_length:
_a : Union[str, Any] = input_ids[-self.model_max_length :]
return input_ids
| 294
|
"""simple docstring"""
import unittest
import numpy as np
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , ):
'''simple docstring'''
_a : List[Any] = np.shape(UpperCamelCase__ )
_a : Any = np.shape(UpperCamelCase__ )
_a : Union[str, Any] = np.shape(UpperCamelCase__ )
if shape_a[0] != shape_b[0]:
_a : int = (
"""Expected the same number of rows for A and B. """
F"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(UpperCamelCase__ )
if shape_b[1] != shape_c[1]:
_a : Tuple = (
"""Expected the same number of columns for B and C. """
F"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(UpperCamelCase__ )
_a : int = pseudo_inv
if a_inv is None:
try:
_a : Optional[int] = np.linalg.inv(UpperCamelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : int ) -> None:
_a : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Tuple = np.array([[0, 3], [3, 0], [2, 3]] )
_a : Optional[int] = np.array([[2, 1], [6, 3]] )
_a : Optional[Any] = schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : Union[str, Any] = np.block([[a, b], [b.T, c]] )
_a : int = np.linalg.det(UpperCAmelCase__ )
_a : Union[str, Any] = np.linalg.det(UpperCAmelCase__ )
_a : List[Any] = np.linalg.det(UpperCAmelCase__ )
self.assertAlmostEqual(UpperCAmelCase__ , det_a * det_s )
def _lowercase ( self : int ) -> None:
_a : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
_a : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(UpperCAmelCase__ ):
schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> None:
_a : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
_a : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(UpperCAmelCase__ ):
schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 294
| 1
|
'''simple docstring'''
from collections import Counter
from timeit import timeit
def a ( __a = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def a ( __a = "" ) -> bool:
'''simple docstring'''
if len(__a ) == 0:
return True
UpperCamelCase__ :List[Any] = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
UpperCamelCase__ :dict[str, int] = {}
for character in lower_case_input_str:
UpperCamelCase__ :Optional[int] = character_freq_dict.get(__a , 0 ) + 1
UpperCamelCase__ :List[str] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def a ( __a = "" ) -> None:
'''simple docstring'''
print('''\nFor string = ''' , __a , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(__a ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(__a ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
__snake_case = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
__snake_case = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 355
|
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
__snake_case = logging.getLogger(__name__)
class lowercase ( A__ ):
"""simple docstring"""
_a = 'masked_bert'
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=0 , UpperCamelCase_="topK" , UpperCamelCase_="constant" , UpperCamelCase_=0.0 , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Tuple = vocab_size
UpperCamelCase__ :Dict = hidden_size
UpperCamelCase__ :Optional[int] = num_hidden_layers
UpperCamelCase__ :Any = num_attention_heads
UpperCamelCase__ :Optional[Any] = hidden_act
UpperCamelCase__ :str = intermediate_size
UpperCamelCase__ :List[Any] = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :List[Any] = max_position_embeddings
UpperCamelCase__ :int = type_vocab_size
UpperCamelCase__ :List[Any] = initializer_range
UpperCamelCase__ :Any = layer_norm_eps
UpperCamelCase__ :str = pruning_method
UpperCamelCase__ :int = mask_init
UpperCamelCase__ :Optional[Any] = mask_scale
| 219
| 0
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , ) -> List[Any]:
'''simple docstring'''
a__ : Any = size if size is not None else {'''height''': 18, '''width''': 18}
a__ : Dict = parent
a__ : Dict = batch_size
a__ : int = num_channels
a__ : Union[str, Any] = image_size
a__ : Optional[int] = min_resolution
a__ : List[str] = max_resolution
a__ : Dict = do_resize
a__ : Any = size
a__ : Any = do_normalize
a__ : int = image_mean
a__ : Tuple = image_std
def __lowercase ( self) -> Tuple:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : List[str] = DPTImageProcessor if is_vision_available() else None
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[int] = DPTImageProcessingTester(self)
@property
def __lowercase ( self) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase_ , 'image_mean'))
self.assertTrue(hasattr(UpperCamelCase_ , 'image_std'))
self.assertTrue(hasattr(UpperCamelCase_ , 'do_normalize'))
self.assertTrue(hasattr(UpperCamelCase_ , 'do_resize'))
self.assertTrue(hasattr(UpperCamelCase_ , 'size'))
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 18, 'width': 18})
a__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {'height': 42, 'width': 42})
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image)
# Test not batched input
a__ : List[str] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a__ : Optional[Any] = image_processing(UpperCamelCase_ , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray)
# Test not batched input
a__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a__ : Any = image_processing(UpperCamelCase_ , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor)
# Test not batched input
a__ : List[str] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a__ : str = image_processing(UpperCamelCase_ , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 99
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class UpperCAmelCase_ ( unittest.TestCase ):
UpperCamelCase =MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase =TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : List[Any] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
__lowercase : Union[str, Any] = text_generator('''This is a test''' , do_sample=UpperCamelCase_ )
self.assertEqual(
UpperCamelCase_ , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
__lowercase : Dict = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
UpperCamelCase_ , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
__lowercase : List[str] = text_generator('''This is a test''' , do_sample=UpperCamelCase_ , num_return_sequences=2 , return_tensors=UpperCamelCase_ )
self.assertEqual(
UpperCamelCase_ , [
{'''generated_token_ids''': ANY(UpperCamelCase_ )},
{'''generated_token_ids''': ANY(UpperCamelCase_ )},
] , )
__lowercase : str = text_generator.model.config.eos_token_id
__lowercase : Optional[int] = '''<pad>'''
__lowercase : Tuple = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=UpperCamelCase_ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase_ , )
self.assertEqual(
UpperCamelCase_ , [
[
{'''generated_token_ids''': ANY(UpperCamelCase_ )},
{'''generated_token_ids''': ANY(UpperCamelCase_ )},
],
[
{'''generated_token_ids''': ANY(UpperCamelCase_ )},
{'''generated_token_ids''': ANY(UpperCamelCase_ )},
],
] , )
@require_tf
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : List[Any] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
__lowercase : Optional[int] = text_generator('''This is a test''' , do_sample=UpperCamelCase_ )
self.assertEqual(
UpperCamelCase_ , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
__lowercase : Any = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=UpperCamelCase_ )
self.assertEqual(
UpperCamelCase_ , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
__lowercase : Any = TextGenerationPipeline(model=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
return text_generator, ["This is a test", "Another test"]
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : str = '''Hello I believe in'''
__lowercase : int = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
__lowercase : str = text_generator(UpperCamelCase_ )
self.assertEqual(
UpperCamelCase_ , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
__lowercase : List[Any] = text_generator(UpperCamelCase_ , stop_sequence=''' fe''' )
self.assertEqual(UpperCamelCase_ , [{'''generated_text''': '''Hello I believe in fe'''}] )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
__lowercase : Tuple = text_generator.model
__lowercase : Optional[Any] = text_generator.tokenizer
__lowercase : List[Any] = text_generator('''This is a test''' )
self.assertEqual(UpperCamelCase_ , [{'''generated_text''': ANY(UpperCamelCase_ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
__lowercase : Dict = text_generator('''This is a test''' , return_full_text=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [{'''generated_text''': ANY(UpperCamelCase_ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
__lowercase : Tuple = pipeline(task='''text-generation''' , model=UpperCamelCase_ , tokenizer=UpperCamelCase_ , return_full_text=UpperCamelCase_ )
__lowercase : List[Any] = text_generator('''This is a test''' )
self.assertEqual(UpperCamelCase_ , [{'''generated_text''': ANY(UpperCamelCase_ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
__lowercase : int = text_generator('''This is a test''' , return_full_text=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [{'''generated_text''': ANY(UpperCamelCase_ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
__lowercase : Dict = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=UpperCamelCase_ )
self.assertEqual(
UpperCamelCase_ , [
[{'''generated_text''': ANY(UpperCamelCase_ )}, {'''generated_text''': ANY(UpperCamelCase_ )}],
[{'''generated_text''': ANY(UpperCamelCase_ )}, {'''generated_text''': ANY(UpperCamelCase_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__lowercase : int = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase_ )
self.assertEqual(
UpperCamelCase_ , [
[{'''generated_text''': ANY(UpperCamelCase_ )}, {'''generated_text''': ANY(UpperCamelCase_ )}],
[{'''generated_text''': ANY(UpperCamelCase_ )}, {'''generated_text''': ANY(UpperCamelCase_ )}],
] , )
with self.assertRaises(UpperCamelCase_ ):
__lowercase : Tuple = text_generator('''test''' , return_full_text=UpperCamelCase_ , return_text=UpperCamelCase_ )
with self.assertRaises(UpperCamelCase_ ):
__lowercase : Union[str, Any] = text_generator('''test''' , return_full_text=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
with self.assertRaises(UpperCamelCase_ ):
__lowercase : Optional[Any] = text_generator('''test''' , return_text=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__lowercase : str = text_generator('''''' )
self.assertEqual(UpperCamelCase_ , [{'''generated_text''': ANY(UpperCamelCase_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__lowercase : List[Any] = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__lowercase : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_00_00
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 5_00 , max_new_tokens=20 )
__lowercase : List[Any] = text_generator('''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(UpperCamelCase_ ):
text_generator(
'''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowerCamelCase ( self ) -> Optional[Any]:
import torch
# Classic `model_kwargs`
__lowercase : Optional[Any] = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__lowercase : Union[str, Any] = pipe('''This is a test''' )
self.assertEqual(
UpperCamelCase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__lowercase : Tuple = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__lowercase : int = pipe('''This is a test''' )
self.assertEqual(
UpperCamelCase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__lowercase : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__lowercase : Tuple = pipe('''This is a test''' )
self.assertEqual(
UpperCamelCase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def _lowerCamelCase ( self ) -> Optional[int]:
import torch
__lowercase : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowerCamelCase ( self ) -> Dict:
import torch
__lowercase : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=UpperCamelCase_ , top_p=0.5 )
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : int = '''Hello world'''
__lowercase : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
__lowercase : Dict = logging.get_logger('''transformers.generation.tf_utils''' )
else:
__lowercase : Any = logging.get_logger('''transformers.generation.utils''' )
__lowercase : List[str] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(UpperCamelCase_ ) as cl:
__lowercase : Dict = text_generator(UpperCamelCase_ , max_length=10 , max_new_tokens=1 )
self.assertIn(UpperCamelCase_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(UpperCamelCase_ ) as cl:
__lowercase : Optional[Any] = text_generator(UpperCamelCase_ , max_new_tokens=1 )
self.assertNotIn(UpperCamelCase_ , cl.out )
with CaptureLogger(UpperCamelCase_ ) as cl:
__lowercase : List[str] = text_generator(UpperCamelCase_ , max_length=10 )
self.assertNotIn(UpperCamelCase_ , cl.out )
| 249
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=7 , lowerCamelCase_=3 , lowerCamelCase_=30 , lowerCamelCase_=4_00 , lowerCamelCase_=True , lowerCamelCase_=None , lowerCamelCase_=0.9 , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_=[0.5, 0.5, 0.5] , lowerCamelCase_=[0.5, 0.5, 0.5] , ) -> Tuple:
lowerCAmelCase__ = size if size is not None else {'''shortest_edge''': 30}
lowerCAmelCase__ = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize_and_center_crop
lowerCAmelCase__ = size
lowerCAmelCase__ = crop_pct
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean
lowerCAmelCase__ = image_std
def __SCREAMING_SNAKE_CASE ( self ) -> str:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Any = PoolFormerImageProcessor if is_vision_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = PoolFormerImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''crop_pct''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''image_std''' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase__ = image_processing(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase__ = image_processing(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase__ = image_processing(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 228
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ = None ) -> List[str]:
lowerCAmelCase__ = value
lowerCAmelCase__ = None # Added in order to delete a node easier
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ = None ) -> Union[str, Any]:
lowerCAmelCase__ = root
def __str__( self ) -> str:
return str(self.root )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if new_children is not None: # reset its kids
lowerCAmelCase__ = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCamelCase_ ): # If it is the right children
lowerCAmelCase__ = new_children
else:
lowerCAmelCase__ = new_children
else:
lowerCAmelCase__ = new_children
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
return self.root is None
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = Node(lowerCamelCase_ ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase__ = new_node # set its root
else: # Tree is not empty
lowerCAmelCase__ = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase__ = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase__ = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase__ = new_node
break
else:
lowerCAmelCase__ = parent_node.right
lowerCAmelCase__ = parent_node
def __SCREAMING_SNAKE_CASE ( self , *lowerCamelCase_ ) -> None:
for value in values:
self.__insert(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
lowerCAmelCase__ = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase__ = node.left if value < node.value else node.right
return node
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = None ) -> Node | None:
if node is None:
if self.root is None:
return None
lowerCAmelCase__ = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase__ = node.right
return node
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = None ) -> Node | None:
if node is None:
lowerCAmelCase__ = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase__ = self.root
while node.left is not None:
lowerCAmelCase__ = node.left
return node
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = self.search(lowerCamelCase_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCamelCase_ , lowerCamelCase_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCamelCase_ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCamelCase_ , node.left )
else:
lowerCAmelCase__ = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase__ = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if node:
self.inorder(lowerCamelCase_ , node.left )
arr.append(node.value )
self.inorder(lowerCamelCase_ , node.right )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = []
self.inorder(lowerCamelCase_ , lowerCamelCase_ ) # append all values to list using inorder traversal
return arr[k - 1]
def _snake_case ( A ) -> list[Node]:
lowerCAmelCase__ = []
if curr_node is not None:
lowerCAmelCase__ = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _snake_case ( ) -> None:
lowerCAmelCase__ = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase__ = BinarySearchTree()
for i in testlist:
t.insert(A )
# Prints all the elements of the list in order traversal
print(A )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(A )
print(A )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 228
| 1
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
a =pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
inspect_dataset(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[Any] = path + '.py'
assert script_name in os.listdir(lowerCamelCase__ )
assert "__pycache__" not in os.listdir(lowerCamelCase__ )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
inspect_metric(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Tuple = path + '.py'
assert script_name in os.listdir(lowerCamelCase__ )
assert "__pycache__" not in os.listdir(lowerCamelCase__ )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
__lowerCamelCase : int = get_dataset_config_info(lowerCamelCase__ , config_name=lowerCamelCase__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
with pytest.raises(lowerCamelCase__ ):
get_dataset_config_info(lowerCamelCase__ , config_name=lowerCamelCase__ )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
__lowerCamelCase : Tuple = get_dataset_config_names(lowerCamelCase__ )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : Tuple = get_dataset_infos(lowerCamelCase__ )
assert list(infos.keys() ) == expected_configs
__lowerCamelCase : List[Any] = expected_configs[0]
assert expected_config in infos
__lowerCamelCase : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : str = get_dataset_infos(lowerCamelCase__ )
assert expected_config in infos
__lowerCamelCase : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
with pytest.raises(lowerCamelCase__ ):
get_dataset_split_names(lowerCamelCase__ , config_name=lowerCamelCase__ )
| 73
|
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""audio_values""", """audio_mask"""]
def __init__( self :List[str] , lowerCamelCase_ :List[str]=2_048 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :int=[16, 16] , lowerCamelCase_ :str=128 , lowerCamelCase_ :Union[str, Any]=44_100 , lowerCamelCase_ :Optional[Any]=86 , lowerCamelCase_ :Dict=2_048 , lowerCamelCase_ :Union[str, Any]=0.0 , **lowerCamelCase_ :Tuple , ):
"""simple docstring"""
super().__init__(
feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCamelCase__ : List[str] =spectrogram_length
lowerCamelCase__ : Dict =num_channels
lowerCamelCase__ : List[Any] =patch_size
lowerCamelCase__ : Union[str, Any] =feature_size // self.patch_size[1]
lowerCamelCase__ : int =n_fft
lowerCamelCase__ : List[str] =sampling_rate // hop_length_to_sampling_rate
lowerCamelCase__ : str =sampling_rate
lowerCamelCase__ : int =padding_value
lowerCamelCase__ : Dict =mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCamelCase_ , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCamelCase_ , norm='slaney' , mel_scale='slaney' , ).T
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :np.array ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =spectrogram(
lowerCamelCase_ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
lowerCamelCase__ : Any =log_spec[:, :-1]
lowerCamelCase__ : Tuple =log_spec - 20.0
lowerCamelCase__ : List[str] =np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self :Optional[Any] , lowerCamelCase_ :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :Optional[bool] = True , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = False , **lowerCamelCase_ :Tuple , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCamelCase__ : Dict =isinstance(lowerCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowerCamelCase__ : Union[str, Any] =is_batched_numpy or (
isinstance(lowerCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase__ : Optional[Any] =[np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray ):
lowerCamelCase__ : Optional[Any] =np.asarray(lowerCamelCase_ , dtype=np.floataa )
elif isinstance(lowerCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase__ : Union[str, Any] =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase__ : List[str] =[np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase__ : Any =[
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCamelCase_ ):
lowerCamelCase__ : Dict =[np.asarray(lowerCamelCase_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase__ : Optional[Any] =max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase__ : Any =[
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase__ : Union[str, Any] =np.array(lowerCamelCase_ ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase__ : Tuple =max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase__ : str =np.ones([len(lowerCamelCase_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase__ : Dict =padded_audio_features * self.padding_value
for i in range(len(lowerCamelCase_ ) ):
lowerCamelCase__ : Union[str, Any] =audio_features[i]
lowerCamelCase__ : Union[str, Any] =feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase__ : int ={'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
lowerCamelCase__ : Tuple ={'audio_values': padded_audio_features}
lowerCamelCase__ : Union[str, Any] =BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
return encoded_inputs
| 126
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Tuple , *UpperCamelCase_: Dict , **UpperCamelCase_: Optional[int] ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , """decord""" )
self.check_model_type(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: int=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[int]=None ):
__lowerCamelCase = {}
if frame_sampling_rate is not None:
__lowerCamelCase = frame_sampling_rate
if num_frames is not None:
__lowerCamelCase = num_frames
__lowerCamelCase = {}
if top_k is not None:
__lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Any , UpperCamelCase_: Union[str, List[str]] , **UpperCamelCase_: str ):
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str]=None , UpperCamelCase_: List[Any]=1 ):
if num_frames is None:
__lowerCamelCase = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
__lowerCamelCase = BytesIO(requests.get(UpperCamelCase_ ).content )
__lowerCamelCase = VideoReader(UpperCamelCase_ )
videoreader.seek(0 )
__lowerCamelCase = 0
__lowerCamelCase = num_frames * frame_sampling_rate - 1
__lowerCamelCase = np.linspace(UpperCamelCase_ , UpperCamelCase_ , num=UpperCamelCase_ , dtype=np.intaa )
__lowerCamelCase = videoreader.get_batch(UpperCamelCase_ ).asnumpy()
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Any ):
__lowerCamelCase = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase = model_outputs.logits.softmax(-1 )[0]
__lowerCamelCase, __lowerCamelCase = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowerCamelCase = scores.tolist()
__lowerCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 29
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
lowerCAmelCase__ = KandinskyImgaImgPipeline
lowerCAmelCase__ = ["prompt", "image_embeds", "negative_image_embeds", "image"]
lowerCAmelCase__ = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
lowerCAmelCase__ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCAmelCase__ = False
@property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return 32
@property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return 100
@property
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def lowercase_ ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
UpperCAmelCase__ : Tuple = MultilingualCLIP(__lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = text_encoder.eval()
return text_encoder
@property
def lowercase_ ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase__ : List[Any] = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.dummy_text_encoder
UpperCAmelCase__ : Tuple = self.dummy_tokenizer
UpperCAmelCase__ : Dict = self.dummy_unet
UpperCAmelCase__ : int = self.dummy_movq
UpperCAmelCase__ : Any = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
UpperCAmelCase__ : Optional[Any] = DDIMScheduler(**__lowerCAmelCase )
UpperCAmelCase__ : List[str] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowercase_ ( self : str , _A : Any , _A : int=0 ):
'''simple docstring'''
UpperCAmelCase__ : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowerCAmelCase )
# create init_image
UpperCAmelCase__ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Dict = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(__lowerCAmelCase ).startswith('''mps''' ):
UpperCAmelCase__ : Dict = torch.manual_seed(__lowerCAmelCase )
else:
UpperCAmelCase__ : int = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
UpperCAmelCase__ : Tuple = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = '''cpu'''
UpperCAmelCase__ : List[Any] = self.get_dummy_components()
UpperCAmelCase__ : Optional[int] = self.pipeline_class(**__lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCAmelCase__ : Dict = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
UpperCAmelCase__ : Optional[int] = output.images
UpperCAmelCase__ : int = pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
UpperCAmelCase__ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase__ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : List[Any] = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCAmelCase__ : List[Any] = '''A red cartoon frog, 4k'''
UpperCAmelCase__ : int = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCAmelCase )
UpperCAmelCase__ : List[Any] = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
UpperCAmelCase__ : Optional[Any] = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCAmelCase__ : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase__ : List[str] = pipe_prior(
__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase__ : List[Any] = pipeline(
__lowerCAmelCase , image=__lowerCAmelCase , image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
UpperCAmelCase__ : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 181
|
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( A_ : Tuple, A_ : int, A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = LxmertConfig.from_json_file(A_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowerCamelCase : List[str] = LxmertForPreTraining(A_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A_, A_, A_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 72
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
create_state_space_tree(UpperCamelCase , [] , 0 )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if index == len(UpperCamelCase ):
print(UpperCamelCase )
return
create_state_space_tree(UpperCamelCase , UpperCamelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(UpperCamelCase , UpperCamelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_lowerCAmelCase = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 184
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
_lowerCAmelCase = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
_lowerCAmelCase = {
'''ctrl''': 256,
}
_lowerCAmelCase = {
'''Pregnancy''': 16_8629,
'''Christianity''': 7675,
'''Explain''': 10_6423,
'''Fitness''': 6_3440,
'''Saving''': 6_3163,
'''Ask''': 2_7171,
'''Ass''': 9_5985,
'''Joke''': 16_3509,
'''Questions''': 4_5622,
'''Thoughts''': 4_9605,
'''Retail''': 5_2342,
'''Feminism''': 16_4338,
'''Writing''': 1_1992,
'''Atheism''': 19_2263,
'''Netflix''': 4_8616,
'''Computing''': 3_9639,
'''Opinion''': 4_3213,
'''Alone''': 4_4967,
'''Funny''': 5_8917,
'''Gaming''': 4_0358,
'''Human''': 4088,
'''India''': 1331,
'''Joker''': 7_7138,
'''Diet''': 3_6206,
'''Legal''': 1_1859,
'''Norman''': 4939,
'''Tip''': 7_2689,
'''Weight''': 5_2343,
'''Movies''': 4_6273,
'''Running''': 2_3425,
'''Science''': 2090,
'''Horror''': 3_7793,
'''Confession''': 6_0572,
'''Finance''': 1_2250,
'''Politics''': 1_6360,
'''Scary''': 19_1985,
'''Support''': 1_2654,
'''Technologies''': 3_2516,
'''Teenage''': 6_6160,
'''Event''': 3_2769,
'''Learned''': 6_7460,
'''Notion''': 18_2770,
'''Wikipedia''': 3_7583,
'''Books''': 6665,
'''Extract''': 7_6050,
'''Confessions''': 10_2701,
'''Conspiracy''': 7_5932,
'''Links''': 6_3674,
'''Narcissus''': 15_0425,
'''Relationship''': 5_4766,
'''Relationships''': 13_4796,
'''Reviews''': 4_1671,
'''News''': 4256,
'''Translation''': 2_6820,
'''multilingual''': 12_8406,
}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Dict = set()
lowerCAmelCase__ : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ : str = char
lowerCAmelCase__ : int = set(UpperCamelCase )
return pairs
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[int] = VOCAB_FILES_NAMES
__lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : str = CONTROL_CODES
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase="<unk>" ,**__UpperCAmelCase ) -> Optional[Any]:
super().__init__(unk_token=__UpperCAmelCase ,**__UpperCAmelCase )
with open(__UpperCAmelCase ,encoding="""utf-8""" ) as vocab_handle:
lowerCAmelCase__ : List[Any] = json.load(__UpperCAmelCase )
lowerCAmelCase__ : str = {v: k for k, v in self.encoder.items()}
with open(__UpperCAmelCase ,encoding="""utf-8""" ) as merges_handle:
lowerCAmelCase__ : Any = merges_handle.read().split("""\n""" )[1:-1]
lowerCAmelCase__ : Optional[Any] = [tuple(merge.split() ) for merge in merges]
lowerCAmelCase__ : Tuple = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ : int = {}
@property
def UpperCAmelCase_ ( self ) -> List[Any]:
return len(self.encoder )
def UpperCAmelCase_ ( self ) -> Optional[int]:
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ : int = tuple(__UpperCAmelCase )
lowerCAmelCase__ : str = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
lowerCAmelCase__ : Tuple = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
lowerCAmelCase__ : Tuple = min(__UpperCAmelCase ,key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = bigram
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : int = 0
while i < len(__UpperCAmelCase ):
try:
lowerCAmelCase__ : Any = word.index(__UpperCAmelCase ,__UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ : Any = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ : Any = tuple(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
lowerCAmelCase__ : List[str] = get_pairs(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = """@@ """.join(__UpperCAmelCase )
lowerCAmelCase__ : int = word[:-4]
lowerCAmelCase__ : Optional[Any] = word
return word
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : Optional[int] = re.findall(R"""\S+\n?""" ,__UpperCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
return self.encoder.get(__UpperCAmelCase ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]:
return self.decoder.get(__UpperCAmelCase ,self.unk_token )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Optional[int] = """ """.join(__UpperCAmelCase ).replace("""@@ """ ,"""""" ).strip()
return out_string
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Tuple = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ : str = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__UpperCAmelCase ,ensure_ascii=__UpperCAmelCase ) + """\n""" )
lowerCAmelCase__ : Optional[int] = 0
with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
lowerCAmelCase__ : List[str] = token_index
writer.write(""" """.join(__UpperCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 184
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ ):
if not numbers:
return 0
if not isinstance(UpperCamelCase_ , (list, tuple) ) or not all(
isinstance(UpperCamelCase_ , UpperCamelCase_ ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
__SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = numbers[0]
for i in range(1 , len(UpperCamelCase_ ) ):
# update the maximum and minimum subarray products
__SCREAMING_SNAKE_CASE = numbers[i]
if number < 0:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = min_till_now, max_till_now
__SCREAMING_SNAKE_CASE = max(UpperCamelCase_ , max_till_now * number )
__SCREAMING_SNAKE_CASE = min(UpperCamelCase_ , min_till_now * number )
# update the maximum product found till now
__SCREAMING_SNAKE_CASE = max(UpperCamelCase_ , UpperCamelCase_ )
return max_prod
| 100
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
__SCREAMING_SNAKE_CASE = False
if num < 0:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = -num
__SCREAMING_SNAKE_CASE = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(UpperCamelCase_ ) for e in binary )
return "0b" + "".join(str(UpperCamelCase_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100
| 1
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=2_24 , __UpperCAmelCase=10_00 , __UpperCAmelCase=[3, 3, 6, 4] , __UpperCAmelCase=[48, 56, 1_12, 2_20] , ) -> List[str]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =is_training
_lowerCAmelCase =use_labels
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =num_labels
_lowerCAmelCase =image_size
_lowerCAmelCase =layer_depths
_lowerCAmelCase =embed_dims
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase =self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ) -> Any:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__UpperCAmelCase , layer_scale_init_value=1e-5 , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
_lowerCAmelCase =SwiftFormerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_lowerCAmelCase =model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =SwiftFormerForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_lowerCAmelCase =model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_lowerCAmelCase =SwiftFormerForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase =model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self ) -> List[str]:
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =self.prepare_config_and_inputs()
_lowerCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCamelCase = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =SwiftFormerModelTester(self )
_lowerCAmelCase =ConfigTester(
self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _lowerCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _lowerCAmelCase ( self ) -> Optional[Any]:
pass
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(__UpperCAmelCase )
_lowerCAmelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(__UpperCAmelCase )
_lowerCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase =[*signature.parameters.keys()]
_lowerCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Any:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =SwiftFormerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _lowerCAmelCase ( self ) -> Tuple:
pass
def _lowerCAmelCase ( self ) -> str:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowerCAmelCase =model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase =model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
_lowerCAmelCase =outputs.hidden_states
_lowerCAmelCase =8
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__UpperCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase =True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Dict:
def _config_zero_init(__UpperCAmelCase ):
_lowerCAmelCase =copy.deepcopy(__UpperCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__UpperCAmelCase , __UpperCAmelCase , 1e-10 )
if isinstance(getattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase ):
_lowerCAmelCase =_config_zero_init(getattr(__UpperCAmelCase , __UpperCAmelCase ) )
setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return configs_no_init
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =_config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCAmelCase ( self ) -> List[str]:
pass
def _lowerCamelCase() -> List[str]:
_lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ) -> str:
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(__UpperCAmelCase )
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase =model(**__UpperCAmelCase )
# verify the logits
_lowerCAmelCase =torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
_lowerCAmelCase =torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
| 341
|
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__A = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
__A = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
__A = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=4 , __UpperCAmelCase=False ) -> Tuple:
_lowerCAmelCase =compute_bleu(
reference_corpus=__UpperCAmelCase , translation_corpus=__UpperCAmelCase , max_order=__UpperCAmelCase , smooth=__UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 341
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCamelCase ( self : List[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self : List[str] ):
snake_case__ : List[Any] = ort.SessionOptions()
snake_case__ : List[Any] = False
return options
def lowerCamelCase ( self : str ):
snake_case__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
snake_case__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
snake_case__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : List[Any] = """A red cat sitting on a park bench"""
snake_case__ : Optional[Any] = np.random.RandomState(0 )
snake_case__ : Optional[int] = pipe(
prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : Optional[Any] = output.images
snake_case__ : Dict = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
snake_case__ : Optional[int] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase ( self : List[str] ):
snake_case__ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
snake_case__ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
snake_case__ : Tuple = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
snake_case__ : Optional[int] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : List[Any] = """A red cat sitting on a park bench"""
snake_case__ : Optional[int] = np.random.RandomState(0 )
snake_case__ : Optional[int] = pipe(
prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : Tuple = output.images
snake_case__ : Optional[int] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
snake_case__ : Union[str, Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 35
|
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Optional[Any] = np.max(_lowerCAmelCase , axis=-1 , keepdims=_lowerCAmelCase )
snake_case__ : List[str] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCAmelCase )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : Optional[Any] , **snake_case_ : int ):
snake_case__ : Optional[int] = {}
if "second_text" in kwargs:
snake_case__ : Union[str, Any] = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def lowerCamelCase ( self : str , snake_case_ : Tuple , snake_case_ : Union[str, Any]=None ):
return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework )
def lowerCamelCase ( self : List[Any] , snake_case_ : Dict ):
return self.model(**snake_case_ )
def lowerCamelCase ( self : int , snake_case_ : List[Any] ):
snake_case__ : Union[str, Any] = model_outputs.logits[0].numpy()
snake_case__ : List[str] = softmax(snake_case_ )
snake_case__ : List[str] = np.argmax(snake_case_ )
snake_case__ : List[str] = self.model.config.idalabel[best_class]
snake_case__ : Optional[int] = probabilities[best_class].item()
snake_case__ : str = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 35
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
_lowerCAmelCase :Optional[int] = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''switch_transformers'''
a__ =['''past_key_values''']
a__ ={'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , A=3_2_1_2_8 , A=7_6_8 , A=6_4 , A=2_0_4_8 , A=6_4 , A=1_2 , A=3 , A=1_2 , A=3 , A=1_2 , A=8 , A=False , A=0.01 , A="float32" , A=False , A=3_2 , A=1_2_8 , A=0.1 , A=1E-6 , A=0.001 , A=0.001 , A=1.0 , A="relu" , A=True , A=False , A=True , A=0 , A=1 , **A , ) -> Any:
_UpperCAmelCase : Dict = vocab_size
_UpperCAmelCase : int = d_model
_UpperCAmelCase : str = d_kv
_UpperCAmelCase : Tuple = d_ff
_UpperCAmelCase : Any = num_sparse_encoder_layers
_UpperCAmelCase : Any = num_layers
_UpperCAmelCase : Any = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_UpperCAmelCase : int = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
_UpperCAmelCase : int = self.num_layers // self.num_sparse_encoder_layers
else:
_UpperCAmelCase : str = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
_UpperCAmelCase : List[Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
_UpperCAmelCase : Union[str, Any] = self.num_decoder_layers # HACK: this will create 0 sparse layers
_UpperCAmelCase : int = num_heads
_UpperCAmelCase : Union[str, Any] = num_experts
_UpperCAmelCase : Dict = expert_capacity
_UpperCAmelCase : List[Any] = router_bias
_UpperCAmelCase : Dict = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
_UpperCAmelCase : Optional[int] = router_dtype
_UpperCAmelCase : Dict = router_ignore_padding_tokens
_UpperCAmelCase : Dict = relative_attention_num_buckets
_UpperCAmelCase : int = relative_attention_max_distance
_UpperCAmelCase : List[str] = dropout_rate
_UpperCAmelCase : Optional[Any] = layer_norm_epsilon
_UpperCAmelCase : List[str] = initializer_factor
_UpperCAmelCase : List[Any] = feed_forward_proj
_UpperCAmelCase : Dict = use_cache
_UpperCAmelCase : Any = add_router_probs
_UpperCAmelCase : int = router_z_loss_coef
_UpperCAmelCase : Tuple = router_aux_loss_coef
_UpperCAmelCase : str = self.feed_forward_proj.split('''-''' )
_UpperCAmelCase : List[Any] = act_info[-1]
_UpperCAmelCase : Any = act_info[0] == '''gated'''
if len(_snake_case ) > 1 and act_info[0] != "gated" or len(_snake_case ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_UpperCAmelCase : int = '''gelu_new'''
super().__init__(
pad_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , **_snake_case , )
| 357
|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : str = tau * frequency / samplerate
_UpperCAmelCase : int = sin(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = cos(UpperCamelCase__ )
_UpperCAmelCase : Any = _sin / (2 * q_factor)
_UpperCAmelCase : Any = (1 - _cos) / 2
_UpperCAmelCase : Tuple = 1 - _cos
_UpperCAmelCase : List[str] = 1 + alpha
_UpperCAmelCase : Union[str, Any] = -2 * _cos
_UpperCAmelCase : Optional[Any] = 1 - alpha
_UpperCAmelCase : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : List[str] = tau * frequency / samplerate
_UpperCAmelCase : Dict = sin(UpperCamelCase__ )
_UpperCAmelCase : Dict = cos(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = _sin / (2 * q_factor)
_UpperCAmelCase : Dict = (1 + _cos) / 2
_UpperCAmelCase : Dict = -1 - _cos
_UpperCAmelCase : Optional[Any] = 1 + alpha
_UpperCAmelCase : str = -2 * _cos
_UpperCAmelCase : Union[str, Any] = 1 - alpha
_UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : List[Any] = tau * frequency / samplerate
_UpperCAmelCase : Optional[int] = sin(UpperCamelCase__ )
_UpperCAmelCase : Dict = cos(UpperCamelCase__ )
_UpperCAmelCase : str = _sin / (2 * q_factor)
_UpperCAmelCase : Tuple = _sin / 2
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : Dict = -ba
_UpperCAmelCase : str = 1 + alpha
_UpperCAmelCase : List[str] = -2 * _cos
_UpperCAmelCase : str = 1 - alpha
_UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : Tuple = tau * frequency / samplerate
_UpperCAmelCase : Dict = sin(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = cos(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
_UpperCAmelCase : Optional[Any] = 1 - alpha
_UpperCAmelCase : Optional[int] = -2 * _cos
_UpperCAmelCase : str = 1 + alpha
_UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : float = 1 / sqrt(2 ) , ):
_UpperCAmelCase : List[str] = tau * frequency / samplerate
_UpperCAmelCase : Union[str, Any] = sin(UpperCamelCase__ )
_UpperCAmelCase : int = cos(UpperCamelCase__ )
_UpperCAmelCase : Dict = _sin / (2 * q_factor)
_UpperCAmelCase : int = 10 ** (gain_db / 40)
_UpperCAmelCase : Union[str, Any] = 1 + alpha * big_a
_UpperCAmelCase : int = -2 * _cos
_UpperCAmelCase : Any = 1 - alpha * big_a
_UpperCAmelCase : Dict = 1 + alpha / big_a
_UpperCAmelCase : str = -2 * _cos
_UpperCAmelCase : Union[str, Any] = 1 - alpha / big_a
_UpperCAmelCase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : float = 1 / sqrt(2 ) , ):
_UpperCAmelCase : str = tau * frequency / samplerate
_UpperCAmelCase : List[Any] = sin(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = cos(UpperCamelCase__ )
_UpperCAmelCase : Dict = _sin / (2 * q_factor)
_UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
_UpperCAmelCase : int = (big_a + 1) - (big_a - 1) * _cos
_UpperCAmelCase : List[str] = (big_a + 1) + (big_a - 1) * _cos
_UpperCAmelCase : List[Any] = (big_a - 1) - (big_a + 1) * _cos
_UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
_UpperCAmelCase : Optional[int] = 2 * sqrt(UpperCamelCase__ ) * alpha
_UpperCAmelCase : Optional[Any] = big_a * (pmc + aaa)
_UpperCAmelCase : List[Any] = 2 * big_a * mpc
_UpperCAmelCase : Any = big_a * (pmc - aaa)
_UpperCAmelCase : Union[str, Any] = ppmc + aaa
_UpperCAmelCase : Dict = -2 * pmpc
_UpperCAmelCase : str = ppmc - aaa
_UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : float = 1 / sqrt(2 ) , ):
_UpperCAmelCase : Tuple = tau * frequency / samplerate
_UpperCAmelCase : Dict = sin(UpperCamelCase__ )
_UpperCAmelCase : str = cos(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
_UpperCAmelCase : str = 10 ** (gain_db / 40)
_UpperCAmelCase : Any = (big_a + 1) - (big_a - 1) * _cos
_UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
_UpperCAmelCase : Union[str, Any] = (big_a - 1) - (big_a + 1) * _cos
_UpperCAmelCase : Dict = (big_a - 1) + (big_a + 1) * _cos
_UpperCAmelCase : Union[str, Any] = 2 * sqrt(UpperCamelCase__ ) * alpha
_UpperCAmelCase : str = big_a * (ppmc + aaa)
_UpperCAmelCase : List[str] = -2 * big_a * pmpc
_UpperCAmelCase : Any = big_a * (ppmc - aaa)
_UpperCAmelCase : str = pmc + aaa
_UpperCAmelCase : Any = 2 * mpc
_UpperCAmelCase : Tuple = pmc - aaa
_UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 68
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class snake_case :
"""simple docstring"""
def __init__( self : Dict , __A : Tuple , ):
__UpperCamelCase = parent
__UpperCamelCase = 1_3
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = 2
__UpperCamelCase = 9_9
__UpperCamelCase = 0
__UpperCamelCase = 3_2
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 5_1_2
__UpperCamelCase = 1_6
__UpperCamelCase = 2
__UpperCamelCase = 0.02
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = 'last'
__UpperCamelCase = True
__UpperCamelCase = None
__UpperCamelCase = 0
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase = None
if self.use_input_lengths:
__UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCamelCase ( self : Any , __A : Dict , __A : int , __A : Tuple , __A : Optional[int] , __A : Optional[int] , __A : Tuple , __A : List[str] , __A : Tuple , __A : Optional[int] , ):
__UpperCamelCase = TFFlaubertModel(config=__A )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__A )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , __A : Any , __A : str , __A : List[str] , __A : Tuple , __A : List[str] , __A : Any , __A : str , __A : Dict , __A : int , ):
__UpperCamelCase = TFFlaubertWithLMHeadModel(__A )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Union[str, Any] , __A : int , __A : Tuple , __A : Dict , __A : int , __A : Tuple , __A : int , __A : Tuple , __A : Union[str, Any] , __A : Any , ):
__UpperCamelCase = TFFlaubertForQuestionAnsweringSimple(__A )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Dict , __A : List[str] , __A : List[Any] , __A : List[Any] , __A : Union[str, Any] , __A : str , __A : str , __A : List[str] , __A : Union[str, Any] , __A : List[Any] , ):
__UpperCamelCase = TFFlaubertForSequenceClassification(__A )
__UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : List[str] , __A : List[str] , __A : Dict , __A : Union[str, Any] , __A : List[Any] , __A : List[Any] , __A : str , __A : int , __A : int , __A : Union[str, Any] , ):
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFFlaubertForTokenClassification(config=__A )
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : int , __A : Dict , __A : List[str] , __A : List[str] , __A : Optional[Any] , __A : Union[str, Any] , __A : Optional[int] , __A : List[str] , __A : Optional[Any] , __A : Dict , ):
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFFlaubertForMultipleChoice(config=__A )
__UpperCamelCase = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =(
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ : List[Any] =(
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE_ : Optional[Any] =(
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : List[str] =False
SCREAMING_SNAKE_CASE_ : Optional[int] =False
def _lowerCamelCase ( self : str , __A : Any , __A : int , __A : Optional[int] , __A : Tuple , __A : Optional[int] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = TFFlaubertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__A , emb_dim=3_7 )
def _lowerCamelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__A )
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__A )
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__A )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__A )
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__A )
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__A )
@slow
def _lowerCamelCase ( self : Optional[int] ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFFlaubertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase = tf.convert_to_tensor(
[[0, 1_5_8, 7_3_5, 2_5_9_2, 1_4_2_4, 6_7_2_7, 8_2, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase = model(__A )[0]
__UpperCamelCase = tf.TensorShape((1, 8, 5_1_2) )
self.assertEqual(output.shape , __A )
# compare the actual values for a slice.
__UpperCamelCase = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 53
|
import random
from typing import Any
def snake_case ( snake_case__ :list) -> list[Any]:
for _ in range(len(snake_case__)):
_A = random.randint(0 , len(snake_case__) - 1)
_A = random.randint(0 , len(snake_case__) - 1)
_A , _A = data[b], data[a]
return data
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = [0, 1, 2, 3, 4, 5, 6, 7]
_SCREAMING_SNAKE_CASE = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 180
| 0
|
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ,a_=13 ,a_=7 ,a_=True ,a_=True ,a_=True ,a_=True ,a_=99 ,a_=24 ,a_=2 ,a_=6 ,a_=37 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=512 ,a_=16 ,a_=2 ,a_=0.02 ,a_=3 ,a_=None ,a_=1_000 ,) -> Any:
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Dict = batch_size
_UpperCAmelCase : Tuple = seq_length
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : Dict = use_input_mask
_UpperCAmelCase : Dict = use_token_type_ids
_UpperCAmelCase : Optional[Any] = use_labels
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : int = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : int = num_attention_heads
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : Any = type_vocab_size
_UpperCAmelCase : Dict = type_sequence_label_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_labels
_UpperCAmelCase : List[Any] = scope
_UpperCAmelCase : str = range_bbox
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase : str = bbox[i, j, 3]
_UpperCAmelCase : List[str] = bbox[i, j, 1]
_UpperCAmelCase : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase : Dict = bbox[i, j, 2]
_UpperCAmelCase : Tuple = bbox[i, j, 0]
_UpperCAmelCase : Optional[Any] = t
_UpperCAmelCase : List[Any] = None
if self.use_input_mask:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
_UpperCAmelCase : str = None
if self.use_token_type_ids:
_UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Any = None
if self.use_labels:
_UpperCAmelCase : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_UpperCAmelCase : str = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self ) -> Union[str, Any]:
return LiltConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,) -> Optional[int]:
_UpperCAmelCase : Optional[int] = LiltModel(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase : Union[str, Any] = model(a_ ,bbox=a_ ,attention_mask=a_ ,token_type_ids=a_ )
_UpperCAmelCase : Union[str, Any] = model(a_ ,bbox=a_ ,token_type_ids=a_ )
_UpperCAmelCase : Tuple = model(a_ ,bbox=a_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,) -> str:
_UpperCAmelCase : str = self.num_labels
_UpperCAmelCase : Union[str, Any] = LiltForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase : Tuple = model(
a_ ,bbox=a_ ,attention_mask=a_ ,token_type_ids=a_ ,labels=a_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,) -> Union[str, Any]:
_UpperCAmelCase : str = LiltForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
_UpperCAmelCase : List[Any] = model(
a_ ,bbox=a_ ,attention_mask=a_ ,token_type_ids=a_ ,start_positions=a_ ,end_positions=a_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Any = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,
) : List[Any] = config_and_inputs
_UpperCAmelCase : str = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ) -> Tuple:
return True
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : str = LiltModelTester(self )
_UpperCAmelCase : str = ConfigTester(self ,config_class=a_ ,hidden_size=37 )
def _snake_case ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _snake_case ( self ) -> int:
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase : int = type
self.model_tester.create_and_check_model(*a_ )
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def _snake_case ( self ) -> Union[str, Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Any = LiltModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
@slow
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> str:
_UpperCAmelCase : int = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(a_ )
_UpperCAmelCase : Tuple = torch.tensor([[1, 2]] ,device=a_ )
_UpperCAmelCase : Dict = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] ,device=a_ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : str = model(input_ids=a_ ,bbox=a_ )
_UpperCAmelCase : Tuple = torch.Size([1, 2, 768] )
_UpperCAmelCase : List[str] = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] ,device=a_ ,)
self.assertTrue(outputs.last_hidden_state.shape ,a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] ,a_ ,atol=1E-3 ) )
| 349
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Union[str, Any] = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """roformer"""
def __init__( self ,a_=50_000 ,a_=None ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=1_536 ,a_=2 ,a_=0.02 ,a_=1E-1_2 ,a_=0 ,a_=False ,a_=True ,**a_ ,) -> Tuple:
super().__init__(pad_token_id=a_ ,**a_ )
_UpperCAmelCase : List[Any] = vocab_size
_UpperCAmelCase : str = hidden_size if embedding_size is None else embedding_size
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Any = type_vocab_size
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Dict = layer_norm_eps
_UpperCAmelCase : Optional[int] = rotary_value
_UpperCAmelCase : Any = use_cache
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase : List[Any] = {0: """batch""", 1: """sequence"""}
_UpperCAmelCase : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 349
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Any = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 347
|
'''simple docstring'''
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) )
else:
return a * actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> float:
if b < 0:
return 1 / actual_power(_lowercase , _lowercase )
return actual_power(_lowercase , _lowercase )
if __name__ == "__main__":
print(power(-2, -3))
| 265
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363
|
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_12 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
def snake_case ( self ):
"""simple docstring"""
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = ids_tensor([self.batch_size] , self.num_choices )
snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
"""simple docstring"""
snake_case = BioGptForCausalLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
# create attention mask
snake_case = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase )
snake_case = self.seq_length // 2
snake_case = 0
# first forward pass
snake_case ,snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
snake_case = ids_tensor((1,) , lowerCAmelCase ).item() + 1
snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
snake_case = random_other_next_tokens
# append to next input_ids and attn_mask
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCAmelCase )] , dim=1 , )
# get two different outputs
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state']
snake_case = model(lowerCAmelCase , past_key_values=lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state']
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=lowerCAmelCase ).to(lowerCAmelCase ).eval()
snake_case = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase )
# first forward pass
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , use_cache=lowerCAmelCase )
snake_case ,snake_case = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state']
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase )[
'last_hidden_state'
]
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase , lowerCAmelCase=False ):
"""simple docstring"""
snake_case = BioGptForCausalLM(lowerCAmelCase )
model.to(lowerCAmelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
snake_case = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def snake_case ( self , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(lowerCAmelCase )
snake_case = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = self.num_labels
snake_case = BioGptForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) = config_and_inputs
snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : str = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : str = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[str] = False
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptModelTester(self )
snake_case = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case = type
self.model_tester.create_and_check_model(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCAmelCase , gradient_checkpointing=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCAmelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(lowerCAmelCase )
snake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
snake_case = 'left'
# Define PAD Token = EOS Token = 50256
snake_case = tokenizer.eos_token
snake_case = model.config.eos_token_id
# use different length sentences to test batching
snake_case = [
'Hello, my dog is a little',
'Today, I',
]
snake_case = tokenizer(lowerCAmelCase , return_tensors='pt' , padding=lowerCAmelCase )
snake_case = inputs['input_ids'].to(lowerCAmelCase )
snake_case = model.generate(
input_ids=lowerCAmelCase , attention_mask=inputs['attention_mask'].to(lowerCAmelCase ) , )
snake_case = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(lowerCAmelCase )
snake_case = model.generate(input_ids=lowerCAmelCase )
snake_case = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
snake_case = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(lowerCAmelCase )
snake_case = model.generate(input_ids=lowerCAmelCase , max_length=model.config.max_length - num_paddings )
snake_case = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
snake_case = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase )
snake_case = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase )
snake_case = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , [non_padded_sentence, padded_sentence] )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = BioGptModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = 3
snake_case = input_dict['input_ids']
snake_case = input_ids.ne(1 ).to(lowerCAmelCase )
snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case = BioGptForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = 3
snake_case = 'multi_label_classification'
snake_case = input_dict['input_ids']
snake_case = input_ids.ne(1 ).to(lowerCAmelCase )
snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case = BioGptForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
snake_case = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
snake_case = model(lowerCAmelCase )[0]
snake_case = 4_23_84
snake_case = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase )
snake_case = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase , atol=1E-4 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(lowerCAmelCase )
torch.manual_seed(0 )
snake_case = tokenizer('COVID-19 is' , return_tensors='pt' ).to(lowerCAmelCase )
snake_case = model.generate(
**lowerCAmelCase , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=lowerCAmelCase , )
snake_case = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase )
snake_case = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 149
| 0
|
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
UpperCamelCase__ = 5_0_0_0_0
UpperCamelCase__ = 5_0_0_0
UpperCamelCase__ , UpperCamelCase__ = os.path.split(__file__)
UpperCamelCase__ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
for i in range(lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = dataset[i]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
UpperCAmelCase__ : Any = dataset[i : i + batch_size]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = dataset[i]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : List[Any] = dataset[i : i + batch_size]
def a__ ( ) -> int:
UpperCAmelCase__ : List[Any] = {'''num examples''': SPEED_TEST_N_EXAMPLES}
UpperCAmelCase__ : Optional[int] = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_00}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10_00}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10_00}),
]
UpperCAmelCase__ : Union[str, Any] = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_00}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10_00}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
UpperCAmelCase__ : Tuple = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
UpperCAmelCase__ : int = generate_example_dataset(
os.path.join(lowerCAmelCase__ , '''dataset.arrow''' ) , lowerCAmelCase__ , num_examples=lowerCAmelCase__ , seq_shapes={'''list''': (1_00,)} , )
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ , str(lowerCAmelCase__ ) )
UpperCAmelCase__ : Dict = func(lowerCAmelCase__ , **lowerCAmelCase__ )
print('''shuffling dataset''' )
UpperCAmelCase__ : Dict = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' , func.__name__ , str(lowerCAmelCase__ ) )
UpperCAmelCase__ : Any = func(
lowerCAmelCase__ , **lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''wb''' ) as f:
f.write(json.dumps(lowerCAmelCase__ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 181
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( __a ):
def __init__( self : Any , _A : int , _A : str ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : Union[str, Any] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
'''simple docstring'''
if audio_length_in_s is None:
UpperCAmelCase__ : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCAmelCase__ : Dict = audio_length_in_s * self.unet.config.sample_rate
UpperCAmelCase__ : Union[str, Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
UpperCAmelCase__ : Optional[Any] = int(_A )
if sample_size % down_scale_factor != 0:
UpperCAmelCase__ : List[str] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
''' process.''' )
UpperCAmelCase__ : Union[str, Any] = int(_A )
UpperCAmelCase__ : Any = next(iter(self.unet.parameters() ) ).dtype
UpperCAmelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase__ : int = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
UpperCAmelCase__ : Union[str, Any] = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase__ : Any = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCAmelCase__ : Union[str, Any] = self.scheduler.step(_A , _A , _A ).prev_sample
UpperCAmelCase__ : Any = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCAmelCase__ : List[str] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 181
| 1
|
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : list[list[str]] = [[] for _ in range(lowerCamelCase_ )]
_lowercase : str = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(lowerCamelCase_ ) <= key:
return input_string
for position, character in enumerate(lowerCamelCase_ ):
_lowercase : int = position % (lowest * 2) # puts it in bounds
_lowercase : Any = min(lowerCamelCase_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowerCamelCase_ )
_lowercase : Tuple = [''.join(lowerCamelCase_ ) for row in temp_grid]
_lowercase : Union[str, Any] = ''.join(lowerCamelCase_ )
return output_string
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : Any = []
_lowercase : str = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
_lowercase : list[list[str]] = [[] for _ in range(lowerCamelCase_ )] # generates template
for position in range(len(lowerCamelCase_ ) ):
_lowercase : int = position % (lowest * 2) # puts it in bounds
_lowercase : Optional[Any] = min(lowerCamelCase_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
_lowercase : Optional[int] = 0
for row in temp_grid: # fills in the characters
_lowercase : List[Any] = input_string[counter : counter + len(lowerCamelCase_ )]
grid.append(list(lowerCamelCase_ ) )
counter += len(lowerCamelCase_ )
_lowercase : Tuple = '' # reads as zigzag
for position in range(len(lowerCamelCase_ ) ):
_lowercase : Optional[Any] = position % (lowest * 2) # puts it in bounds
_lowercase : Optional[Any] = min(lowerCamelCase_ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def UpperCamelCase_( lowerCamelCase_ ) -> dict[int, str]:
_lowercase : Dict = {}
for key_guess in range(1 , len(lowerCamelCase_ ) ): # tries every key
_lowercase : Optional[int] = decrypt(lowerCamelCase_ , lowerCamelCase_ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE : str = logging.getLogger()
def UpperCamelCase_( ) -> Any:
_lowercase : int = argparse.ArgumentParser()
parser.add_argument('-f' )
_lowercase : Optional[Any] = parser.parse_args()
return args.f
class _lowerCamelCase( _a ):
def UpperCamelCase ( self) -> None:
"""simple docstring"""
_lowercase : List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : str = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0, 'run_glue_deebert.py')
with patch.object(lowerCamelCase, 'argv', lowerCamelCase):
_lowercase : Optional[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase, 0.6_6_6)
@slow
@require_torch_non_multi_gpu
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(lowerCamelCase)
_lowercase : Union[str, Any] = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase)
_lowercase : Union[str, Any] = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase)
| 84
| 0
|
def _A ( _lowercase ) -> list[list[float]]:
"""simple docstring"""
__UpperCamelCase = []
for data in source_data:
for i, el in enumerate(_lowercase ):
if len(_lowercase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_lowercase ) )
return data_lists
def _A ( _lowercase , _lowercase ) -> list[list[float]]:
"""simple docstring"""
__UpperCamelCase = []
for dlist, weight in zip(_lowercase , _lowercase ):
__UpperCamelCase = min(_lowercase )
__UpperCamelCase = max(_lowercase )
__UpperCamelCase = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
__UpperCamelCase = f'''Invalid weight of {weight:f} provided'''
raise ValueError(_lowercase )
score_lists.append(_lowercase )
return score_lists
def _A ( _lowercase ) -> list[float]:
"""simple docstring"""
__UpperCamelCase = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_lowercase ):
__UpperCamelCase = final_scores[j] + ele
return final_scores
def _A ( _lowercase , _lowercase ) -> list[list[float]]:
"""simple docstring"""
__UpperCamelCase = get_data(_lowercase )
__UpperCamelCase = calculate_each_score(_lowercase , _lowercase )
__UpperCamelCase = generate_final_scores(_lowercase )
# append scores to source data
for i, ele in enumerate(_lowercase ):
source_data[i].append(_lowercase )
return source_data
| 310
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A, '''tf_padding''' ) )
self.parent.assertTrue(hasattr(__A, '''depth_multiplier''' ) )
class __UpperCAmelCase :
def __init__( self : int, __A : List[Any], __A : str=1_3, __A : Dict=3, __A : int=3_2, __A : int=0.2_5, __A : List[str]=8, __A : int=8, __A : Dict=6, __A : str=3_2, __A : Any=True, __A : str=True, __A : int=True, __A : Union[str, Any]="relu6", __A : Any=1_2_8_0, __A : List[Any]=0.1, __A : Optional[Any]=0.0_2, __A : Tuple=True, __A : List[Any]=True, __A : str=1_0, __A : Optional[Any]=None, ):
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : str = image_size
UpperCAmelCase : Optional[int] = depth_multiplier
UpperCAmelCase : Union[str, Any] = depth_divisible_by
UpperCAmelCase : Optional[Any] = min_depth
UpperCAmelCase : List[str] = expand_ratio
UpperCAmelCase : Dict = tf_padding
UpperCAmelCase : str = output_stride
UpperCAmelCase : Union[str, Any] = first_layer_is_expansion
UpperCAmelCase : List[Any] = finegrained_output
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCAmelCase : Optional[Any] = classifier_dropout_prob
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : Tuple = num_labels
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Any = scope
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Dict = None
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def __magic_name__ ( self : List[Any], __A : Dict, __A : Optional[Any], __A : Optional[int], __A : Union[str, Any] ):
UpperCAmelCase : Any = MobileNetVaModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[Any] = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
self.parent.assertEqual(
result.pooler_output.shape, (self.batch_size, self.last_hidden_size), )
def __magic_name__ ( self : str, __A : Union[str, Any], __A : Dict, __A : Optional[Any], __A : str ):
UpperCAmelCase : Optional[int] = self.num_labels
UpperCAmelCase : Any = MobileNetVaForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[Any], __A : Optional[Any], __A : List[str], __A : Dict, __A : Dict ):
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Dict = MobileNetVaForSemanticSegmentation(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCAmelCase : Optional[Any] = model(__A, labels=__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs
UpperCAmelCase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = MobileNetVaModelTester(self )
UpperCAmelCase : List[Any] = MobileNetVaConfigTester(self, config_class=__A, has_text_modality=__A )
def __magic_name__ ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def __magic_name__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def __magic_name__ ( self : Tuple ):
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def __magic_name__ ( self : Any ):
pass
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
UpperCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : int ):
def check_hidden_states_output(__A : Any, __A : Optional[Any], __A : str ):
UpperCAmelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Optional[Any] = outputs.hidden_states
UpperCAmelCase : List[Any] = 1_6
self.assertEqual(len(__A ), __A )
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@slow
def __magic_name__ ( self : Dict ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] = MobileNetVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> int:
UpperCAmelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : List[Any] ):
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : List[Any] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(__A )
UpperCAmelCase : Optional[int] = self.default_image_processor
UpperCAmelCase : Optional[Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : str = model(**__A )
# verify the logits
UpperCAmelCase : int = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = model.to(__A )
UpperCAmelCase : Tuple = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : int = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**__A )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase : Tuple = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
], device=__A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], __A, atol=1E-4 ) )
| 336
| 0
|
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
A_ : Optional[int] =TypeVar("""T""")
class __a ( Generic[T] ):
SCREAMING_SNAKE_CASE__ : deque[T] # Cache store of keys
SCREAMING_SNAKE_CASE__ : set[T] # References of the keys in cache
SCREAMING_SNAKE_CASE__ : int = 10 # Maximum capacity of cache
def __init__( self , a__ ):
_lowerCamelCase = deque()
_lowerCamelCase = set()
if not n:
_lowerCamelCase = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
_lowerCamelCase = n
def snake_case_ ( self , a__ ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_lowerCamelCase = self.dq_store.pop()
self.key_reference.remove(a__ )
else:
self.dq_store.remove(a__ )
self.dq_store.appendleft(a__ )
self.key_reference.add(a__ )
def snake_case_ ( self ):
for k in self.dq_store:
print(a__ )
def __repr__( self ):
return F'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : LRUCache[str | int] =LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 80
|
"""simple docstring"""
from math import factorial, pi
def SCREAMING_SNAKE_CASE_ ( snake_case : float , snake_case : int = 30 )-> float:
if not isinstance(snake_case , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(snake_case , snake_case ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
_lowerCamelCase = float(snake_case )
_lowerCamelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(snake_case ) )
def SCREAMING_SNAKE_CASE_ ( snake_case : float , snake_case : int = 30 )-> float:
if not isinstance(snake_case , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(snake_case , snake_case ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
_lowerCamelCase = float(snake_case )
_lowerCamelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 80
| 1
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None:
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 29
|
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] )
UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 )
return image
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
# get the original timestep using init_timestep
UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase_ : List[str] = image
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase )
]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 )
else:
UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase )
UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents
UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 )
UpperCAmelCase_ : Tuple = init_latents.shape
UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = init_latents
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
UpperCAmelCase_ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str:
UpperCAmelCase_ : Any = self._execution_device
UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = [image]
if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 )
UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents']
UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
UpperCAmelCase_ : Dict = self.prepare_latents(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : str = {'image_embeds': image_embeds}
UpperCAmelCase_ : Union[str, Any] = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 )
UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[str] = image * 0.5 + 0.5
UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 29
| 1
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _snake_case ( _lowercase ):
def __init__( self: Dict , __lowerCamelCase: int , __lowerCamelCase: List[Any]=13 , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Tuple=True , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: str=False , __lowerCamelCase: str=True , __lowerCamelCase: Dict=99 , __lowerCamelCase: str=32 , __lowerCamelCase: int=5 , __lowerCamelCase: Optional[int]=4 , __lowerCamelCase: Union[str, Any]=64 , __lowerCamelCase: str="gelu" , __lowerCamelCase: str=0.1 , __lowerCamelCase: Tuple=0.1 , __lowerCamelCase: Optional[int]=5_12 , __lowerCamelCase: str=16 , __lowerCamelCase: List[str]=2 , __lowerCamelCase: Any=0.02 , __lowerCamelCase: Dict=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: Optional[Any]=2 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: int=2 , __lowerCamelCase: int=2 , __lowerCamelCase: Tuple=4 , __lowerCamelCase: List[Any]=1 , ) -> List[Any]:
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : Union[str, Any] = seq_length
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : Tuple = use_input_mask
__UpperCAmelCase : int = use_token_type_ids
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : List[str] = vocab_size
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = max_position_embeddings
__UpperCAmelCase : Any = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : List[str] = num_choices
__UpperCAmelCase : List[str] = scope
__UpperCAmelCase : Optional[int] = q_groups
__UpperCAmelCase : List[Any] = k_groups
__UpperCAmelCase : int = v_groups
__UpperCAmelCase : Optional[Any] = post_attention_groups
__UpperCAmelCase : int = intermediate_groups
__UpperCAmelCase : Optional[Any] = output_groups
def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : int = None
if self.use_input_mask:
__UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : str = None
__UpperCAmelCase : int = None
__UpperCAmelCase : Tuple = None
if self.use_labels:
__UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self: str ) -> Optional[int]:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : Optional[int] = SqueezeBertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[Any] , __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Any ) -> Tuple:
__UpperCAmelCase : Optional[Any] = SqueezeBertForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : List[str] = SqueezeBertForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : int = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ) -> Dict:
__UpperCAmelCase : List[str] = self.num_labels
__UpperCAmelCase : Tuple = SqueezeBertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] ) -> Any:
__UpperCAmelCase : str = self.num_labels
__UpperCAmelCase : Any = SqueezeBertForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Any , __lowerCamelCase: Any , __lowerCamelCase: int , __lowerCamelCase: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> Dict:
__UpperCAmelCase : Dict = self.num_choices
__UpperCAmelCase : Union[str, Any] = SqueezeBertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self: Tuple ) -> Optional[int]:
__UpperCAmelCase : str = self.prepare_config_and_inputs()
(__UpperCAmelCase) : Union[str, Any] = config_and_inputs
__UpperCAmelCase : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Dict = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__: Tuple = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__: Any = False
lowerCamelCase__: Optional[int] = True
lowerCamelCase__: Tuple = False
def _lowerCamelCase ( self: Union[str, Any] ) -> int:
__UpperCAmelCase : List[Any] = SqueezeBertModelTester(self )
__UpperCAmelCase : int = ConfigTester(self , config_class=__lowerCamelCase , dim=37 )
def _lowerCamelCase ( self: Tuple ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self: Optional[Any] ) -> List[Any]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*__lowerCamelCase )
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*__lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> Dict:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*__lowerCamelCase )
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*__lowerCamelCase )
def _lowerCamelCase ( self: List[str] ) -> str:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Optional[int] ) -> Optional[int]:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = SqueezeBertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: int ) -> str:
__UpperCAmelCase : Dict = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
__UpperCAmelCase : List[Any] = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
__UpperCAmelCase : List[Any] = model(__lowerCamelCase )[0]
__UpperCAmelCase : int = torch.Size((1, 3) )
self.assertEqual(output.shape , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 ) )
| 359
|
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_snake_case = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
_snake_case = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
_snake_case = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any:
for tf_name, hf_name in patterns:
__UpperCAmelCase : Optional[int] = k.replace(snake_case__, snake_case__ )
return k
def _UpperCamelCase ( snake_case__, snake_case__ ) -> BigBirdPegasusForConditionalGeneration:
__UpperCAmelCase : Dict = BigBirdPegasusConfig(**snake_case__ )
__UpperCAmelCase : Dict = BigBirdPegasusForConditionalGeneration(snake_case__ )
__UpperCAmelCase : Optional[Any] = torch_model.state_dict()
__UpperCAmelCase : Optional[int] = {}
# separating decoder weights
__UpperCAmelCase : List[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
__UpperCAmelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : Optional[int] = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : List[str] = DECODER_PATTERNS
__UpperCAmelCase : str = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : Optional[int] = v.T
__UpperCAmelCase : str = torch.from_numpy(snake_case__ )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : int = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : Optional[Any] = REMAINING_PATTERNS
__UpperCAmelCase : Optional[int] = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : List[Any] = v.T
__UpperCAmelCase : List[str] = torch.from_numpy(snake_case__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
__UpperCAmelCase : List[Any] = mapping["model.embed_positions.weight"]
__UpperCAmelCase : Optional[Any] = mapping.pop("model.embed_positions.weight" )
__UpperCAmelCase , __UpperCAmelCase : Any = torch_model.load_state_dict(snake_case__, strict=snake_case__ )
__UpperCAmelCase : str = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def _UpperCamelCase ( snake_case__ ) -> Dict:
__UpperCAmelCase : Tuple = tf.train.list_variables(snake_case__ )
__UpperCAmelCase : List[str] = {}
__UpperCAmelCase : str = ["global_step"]
for name, shape in tqdm(snake_case__, desc="converting tf checkpoint to dict" ):
__UpperCAmelCase : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
__UpperCAmelCase : Optional[Any] = tf.train.load_variable(snake_case__, snake_case__ )
__UpperCAmelCase : Tuple = array
return tf_weights
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : str = get_tf_weights_as_numpy(snake_case__ )
__UpperCAmelCase : List[Any] = convert_bigbird_pegasus(snake_case__, snake_case__ )
torch_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_snake_case = parser.parse_args()
_snake_case = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 342
| 0
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class A__ ( lowerCamelCase_ ):
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , A_ , )
super().__init__(*A_ , **A_ )
| 52
|
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
_lowerCamelCase ="sshleifer/mar_enro_6_3_student"
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Union[str, Any] ):
super().setUp()
SCREAMING_SNAKE_CASE =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' ,extract_compressed_file=snake_case ,)
SCREAMING_SNAKE_CASE =f'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Optional[int] ):
MarianMTModel.from_pretrained(snake_case )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
SCREAMING_SNAKE_CASE =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
for k, v in env_vars_to_replace.items():
SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) )
SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
SCREAMING_SNAKE_CASE =f'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
SCREAMING_SNAKE_CASE =['finetune.py'] + bash_script.split() + args
with patch.object(snake_case ,'argv' ,snake_case ):
SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case )
SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE =parser.parse_args()
SCREAMING_SNAKE_CASE =main(snake_case )
# Check metrics
SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
SCREAMING_SNAKE_CASE =metrics['val'][0]
SCREAMING_SNAKE_CASE =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) ,(args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case )
self.assertGreater(last_step_stats['val_avg_gen_time'] ,0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] ,1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] ,2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] ,17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) ,1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
SCREAMING_SNAKE_CASE =os.listdir(snake_case )
SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0]
SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case )
SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' )
SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =f'{self.test_file_dir_str}/test_data/wmt_en_ro'
SCREAMING_SNAKE_CASE ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
SCREAMING_SNAKE_CASE =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
SCREAMING_SNAKE_CASE =bash_script.replace('--fp16 ' ,' ' )
for k, v in env_vars_to_replace.items():
SCREAMING_SNAKE_CASE =bash_script.replace(snake_case ,str(snake_case ) )
SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE =bash_script.replace('--fp16' ,'' )
SCREAMING_SNAKE_CASE =6
SCREAMING_SNAKE_CASE =(
['distillation.py']
+ bash_script.split()
+ [
f'--output_dir={output_dir}',
'--gpus=1',
'--learning_rate=1e-3',
f'--num_train_epochs={epochs}',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(snake_case ,'argv' ,snake_case ):
SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(snake_case )
SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
SCREAMING_SNAKE_CASE =distill_main(snake_case )
# Check metrics
SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
SCREAMING_SNAKE_CASE =metrics['val'][0]
SCREAMING_SNAKE_CASE =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] ,snake_case )
# check lightning ckpt can be loaded and has a reasonable statedict
SCREAMING_SNAKE_CASE =os.listdir(snake_case )
SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0]
SCREAMING_SNAKE_CASE =os.path.join(args.output_dir ,snake_case )
SCREAMING_SNAKE_CASE =torch.load(snake_case ,map_location='cpu' )
SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
SCREAMING_SNAKE_CASE ={os.path.basename(snake_case ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 334
| 0
|
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> str:
try:
__lowerCAmelCase: Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCAmelCase: Dict = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCAmelCase: Dict = strtobool(__SCREAMING_SNAKE_CASE )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
__A = parse_flag_from_env("RUN_SLOW", default=False)
def a__ ( __SCREAMING_SNAKE_CASE ) -> List[Any]:
return unittest.skip("Test was skipped" )(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
return unittest.skipUnless(_run_slow_tests , "test is slow" )(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Tuple:
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU" )(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU" )(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return unittest.skipUnless(is_xpu_available() , "test requires a XPU" )(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Tuple:
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`" )(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Any:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite" )(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library" )(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
return unittest.skipUnless(is_tpu_available() , "test requires TPU" )(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> List[Any]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU" )(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU" )(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs" )(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Dict:
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs" )(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Tuple:
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors" )(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed" )(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Any:
return unittest.skipUnless(is_torch_version(">=" , "1.12.0" ) , "test requires torch version >= 1.12.0" )(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
if test_case is None:
return partial(__SCREAMING_SNAKE_CASE , version=__SCREAMING_SNAKE_CASE )
return unittest.skipUnless(is_torch_version(">=" , __SCREAMING_SNAKE_CASE ) , F"test requires torch version >= {version}" )(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Any:
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard" )(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
return unittest.skipUnless(is_wandb_available() , "test requires wandb" )(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> str:
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml" )(__SCREAMING_SNAKE_CASE )
__A = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def a__ ( __SCREAMING_SNAKE_CASE ) -> Tuple:
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(__SCREAMING_SNAKE_CASE )
class snake_case ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int = True
@classmethod
def lowercase_ ( cls : Union[str, Any])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: str = tempfile.mkdtemp()
@classmethod
def lowercase_ ( cls : Dict)-> Any:
'''simple docstring'''
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def lowercase_ ( self : Optional[Any])-> str:
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir).glob("**/*"):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__lowercase)
class snake_case ( unittest.TestCase ):
def lowercase_ ( self : List[Any])-> str:
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class snake_case ( unittest.TestCase ):
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : Dict)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: str = mocks if isinstance(__lowercase , (tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
__lowerCAmelCase: Dict = AcceleratorState()
__lowerCAmelCase: List[str] = tensor[None].clone().to(state.device )
__lowerCAmelCase: Union[str, Any] = gather(__SCREAMING_SNAKE_CASE ).cpu()
__lowerCAmelCase: Any = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __SCREAMING_SNAKE_CASE ):
return False
return True
class snake_case :
def __init__( self : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: List[str] = returncode
__lowerCAmelCase: Any = stdout
__lowerCAmelCase: List[Any] = stderr
async def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
while True:
__lowerCAmelCase: Dict = await stream.readline()
if line:
callback(__SCREAMING_SNAKE_CASE )
else:
break
async def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False ) -> List[Any]:
if echo:
print("\nRunning: " , " ".join(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: List[str] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__SCREAMING_SNAKE_CASE , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__SCREAMING_SNAKE_CASE , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCAmelCase: Optional[Any] = []
__lowerCAmelCase: Optional[int] = []
def tee(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="" ):
__lowerCAmelCase: Tuple = line.decode("utf-8" ).rstrip()
sink.append(__SCREAMING_SNAKE_CASE )
if not quiet:
print(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , file=__SCREAMING_SNAKE_CASE )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __SCREAMING_SNAKE_CASE : tee(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sys.stdout , label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __SCREAMING_SNAKE_CASE : tee(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sys.stderr , label="stderr:" ) ) ),
] , timeout=__SCREAMING_SNAKE_CASE , )
return _RunOutput(await p.wait() , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1_8_0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
__lowerCAmelCase: Dict = asyncio.get_event_loop()
__lowerCAmelCase: List[str] = loop.run_until_complete(
_stream_subprocess(__SCREAMING_SNAKE_CASE , env=__SCREAMING_SNAKE_CASE , stdin=__SCREAMING_SNAKE_CASE , timeout=__SCREAMING_SNAKE_CASE , quiet=__SCREAMING_SNAKE_CASE , echo=__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Any = " ".join(__SCREAMING_SNAKE_CASE )
if result.returncode > 0:
__lowerCAmelCase: List[str] = "\n".join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class snake_case ( __A ):
pass
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
try:
__lowerCAmelCase: Optional[int] = subprocess.check_output(__SCREAMING_SNAKE_CASE , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__SCREAMING_SNAKE_CASE , "decode" ):
__lowerCAmelCase: Optional[int] = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__SCREAMING_SNAKE_CASE )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 359
|
"""simple docstring"""
import numpy as np
def a__ ( __SCREAMING_SNAKE_CASE ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108
| 0
|
"""simple docstring"""
from __future__ import annotations
class snake_case :
'''simple docstring'''
def __init__( self : List[str], _lowerCamelCase : str, _lowerCamelCase : str ):
'''simple docstring'''
__A , __A = text, pattern
__A , __A = len(_lowerCamelCase ), len(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : str ):
'''simple docstring'''
for i in range(self.patLen - 1, -1, -1 ):
if char == self.pattern[i]:
return i
return -1
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : int ):
'''simple docstring'''
for i in range(self.patLen - 1, -1, -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
# searches pattern in text and returns index positions
__A = []
for i in range(self.textLen - self.patLen + 1 ):
__A = self.mismatch_in_text(_lowerCamelCase )
if mismatch_index == -1:
positions.append(_lowerCamelCase )
else:
__A = self.match_in_pattern(self.text[mismatch_index] )
__A = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowercase_ = 'ABAABA'
lowercase_ = 'AB'
lowercase_ = BoyerMooreSearch(text, pattern)
lowercase_ = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 266
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not postfix_notation:
return 0
__A = {'''+''', '''-''', '''*''', '''/'''}
__A = []
for token in postfix_notation:
if token in operations:
__A , __A = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__UpperCamelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266
| 1
|
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
_A : Tuple ='''bert-base-cased'''
_A : Union[str, Any] ='''fp16'''
_A : Dict ='''bf16'''
_A : Any =[FPaa, BFaa]
@require_fsdp
@require_cuda
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
super().setUp()
lowerCamelCase__ : str = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def lowerCamelCase_ ( self: str ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(UpperCamelCase__ ):
lowerCamelCase__ : Union[str, Any] = self.dist_env.copy()
lowerCamelCase__ : Tuple = F'''{i + 1}'''
lowerCamelCase__ : Dict = strategy
with mockenv_context(**UpperCamelCase__ ):
lowerCamelCase__ : Union[str, Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def lowerCamelCase_ ( self: List[str] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(UpperCamelCase__ ):
lowerCamelCase__ : List[str] = self.dist_env.copy()
lowerCamelCase__ : List[Any] = prefetch_policy
with mockenv_context(**UpperCamelCase__ ):
lowerCamelCase__ : str = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def lowerCamelCase_ ( self: Union[str, Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(UpperCamelCase__ ):
lowerCamelCase__ : Tuple = self.dist_env.copy()
lowerCamelCase__ : List[str] = state_dict_type
with mockenv_context(**UpperCamelCase__ ):
lowerCamelCase__ : List[str] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Optional[int] = AutoModel.from_pretrained(UpperCamelCase__ )
for policy in FSDP_AUTO_WRAP_POLICY:
lowerCamelCase__ : Any = self.dist_env.copy()
lowerCamelCase__ : str = policy
if policy == "TRANSFORMER_BASED_WRAP":
lowerCamelCase__ : Any = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
lowerCamelCase__ : List[str] = """2000"""
with mockenv_context(**UpperCamelCase__ ):
lowerCamelCase__ : str = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
lowerCamelCase__ : str = self.dist_env.copy()
lowerCamelCase__ : str = """TRANSFORMER_BASED_WRAP"""
lowerCamelCase__ : str = """T5Layer"""
with mockenv_context(**UpperCamelCase__ ):
lowerCamelCase__ : Optional[int] = FullyShardedDataParallelPlugin()
with self.assertRaises(UpperCamelCase__ ) as cm:
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
lowerCamelCase__ : Any = self.dist_env.copy()
lowerCamelCase__ : Any = """SIZE_BASED_WRAP"""
lowerCamelCase__ : List[str] = """0"""
with mockenv_context(**UpperCamelCase__ ):
lowerCamelCase__ : Any = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def lowerCamelCase_ ( self: int ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
lowerCamelCase__ : str = self.dist_env.copy()
lowerCamelCase__ : Optional[Any] = mp_dtype
with mockenv_context(**UpperCamelCase__ ):
lowerCamelCase__ : int = Accelerator()
if mp_dtype == "fp16":
lowerCamelCase__ : str = torch.floataa
elif mp_dtype == "bf16":
lowerCamelCase__ : Any = torch.bfloataa
lowerCamelCase__ : Dict = MixedPrecision(param_dtype=UpperCamelCase__ , reduce_dtype=UpperCamelCase__ , buffer_dtype=UpperCamelCase__ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , UpperCamelCase__ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , UpperCamelCase__ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
lowerCamelCase__ : List[str] = self.dist_env.copy()
lowerCamelCase__ : Optional[int] = str(UpperCamelCase__ ).lower()
with mockenv_context(**UpperCamelCase__ ):
lowerCamelCase__ : Optional[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=UpperCamelCase__ ) )
@require_fsdp
@require_multi_gpu
@slow
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Optional[int] ):
super().setUp()
lowerCamelCase__ : Union[str, Any] = 0.82
lowerCamelCase__ : str = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
lowerCamelCase__ : List[Any] = {
"""multi_gpu_fp16""": 3_200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2_000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1_900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
lowerCamelCase__ : Any = 160
lowerCamelCase__ : str = 160
lowerCamelCase__ : Tuple = inspect.getfile(accelerate.test_utils )
lowerCamelCase__ : str = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Union[str, Any] = os.path.join(self.test_scripts_folder , """test_performance.py""" )
lowerCamelCase__ : Dict = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
lowerCamelCase__ : Optional[Any] = cmd.copy()
for i, strategy in enumerate(UpperCamelCase__ ):
if strategy.lower() in config:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
lowerCamelCase__ : str = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(UpperCamelCase__ ):
lowerCamelCase__ : Optional[Any] = cmd.copy()
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
lowerCamelCase__ : str = cmd_config[:state_dict_config_index]
cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
lowerCamelCase__ : int = cmd_config[:-1]
lowerCamelCase__ : Tuple = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
F'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
lowerCamelCase__ : Optional[Any] = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
lowerCamelCase__ : Optional[int] = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(UpperCamelCase__ ):
if strategy.lower() in spec:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
F'''--n_train={self.n_train}''',
F'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
| 129
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : Union[str, Any] =logging.get_logger(__name__)
_A : List[Any] ={
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _lowercase ( _lowercase ):
a = """deformable_detr"""
a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self: Optional[int] , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=None , UpperCamelCase__: List[str]=3 , UpperCamelCase__: Any=300 , UpperCamelCase__: Optional[int]=1_024 , UpperCamelCase__: int=6 , UpperCamelCase__: str=1_024 , UpperCamelCase__: Optional[Any]=8 , UpperCamelCase__: Optional[Any]=6 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=8 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Tuple=True , UpperCamelCase__: Any="relu" , UpperCamelCase__: Any=256 , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Any=0.0 , UpperCamelCase__: List[str]=0.02 , UpperCamelCase__: str=1.0 , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=False , UpperCamelCase__: str="sine" , UpperCamelCase__: Optional[Any]="resnet50" , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: List[Any]=4 , UpperCamelCase__: Any=4 , UpperCamelCase__: int=4 , UpperCamelCase__: int=False , UpperCamelCase__: Optional[Any]=300 , UpperCamelCase__: str=False , UpperCamelCase__: int=1 , UpperCamelCase__: Tuple=5 , UpperCamelCase__: List[Any]=2 , UpperCamelCase__: Optional[int]=1 , UpperCamelCase__: int=1 , UpperCamelCase__: Tuple=5 , UpperCamelCase__: str=2 , UpperCamelCase__: str=0.1 , UpperCamelCase__: List[str]=0.25 , UpperCamelCase__: Any=False , **UpperCamelCase__: Optional[Any] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase__ : Union[str, Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Tuple = backbone_config.get("""model_type""" )
lowerCamelCase__ : Dict = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ : Optional[Any] = config_class.from_dict(UpperCamelCase__ )
lowerCamelCase__ : Tuple = use_timm_backbone
lowerCamelCase__ : Tuple = backbone_config
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : str = num_queries
lowerCamelCase__ : int = max_position_embeddings
lowerCamelCase__ : str = d_model
lowerCamelCase__ : Dict = encoder_ffn_dim
lowerCamelCase__ : Union[str, Any] = encoder_layers
lowerCamelCase__ : int = encoder_attention_heads
lowerCamelCase__ : str = decoder_ffn_dim
lowerCamelCase__ : Optional[Any] = decoder_layers
lowerCamelCase__ : Optional[int] = decoder_attention_heads
lowerCamelCase__ : int = dropout
lowerCamelCase__ : Optional[Any] = attention_dropout
lowerCamelCase__ : List[Any] = activation_dropout
lowerCamelCase__ : List[Any] = activation_function
lowerCamelCase__ : int = init_std
lowerCamelCase__ : Dict = init_xavier_std
lowerCamelCase__ : Tuple = encoder_layerdrop
lowerCamelCase__ : str = auxiliary_loss
lowerCamelCase__ : int = position_embedding_type
lowerCamelCase__ : Tuple = backbone
lowerCamelCase__ : Tuple = use_pretrained_backbone
lowerCamelCase__ : Optional[int] = dilation
# deformable attributes
lowerCamelCase__ : Optional[int] = num_feature_levels
lowerCamelCase__ : Tuple = encoder_n_points
lowerCamelCase__ : Tuple = decoder_n_points
lowerCamelCase__ : Dict = two_stage
lowerCamelCase__ : int = two_stage_num_proposals
lowerCamelCase__ : Any = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
lowerCamelCase__ : Optional[int] = class_cost
lowerCamelCase__ : List[str] = bbox_cost
lowerCamelCase__ : Any = giou_cost
# Loss coefficients
lowerCamelCase__ : Union[str, Any] = mask_loss_coefficient
lowerCamelCase__ : Tuple = dice_loss_coefficient
lowerCamelCase__ : int = bbox_loss_coefficient
lowerCamelCase__ : Optional[Any] = giou_loss_coefficient
lowerCamelCase__ : Dict = eos_coefficient
lowerCamelCase__ : Any = focal_alpha
lowerCamelCase__ : Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowerCamelCase_ ( self: Dict ):
return self.encoder_attention_heads
@property
def lowerCamelCase_ ( self: Any ):
return self.d_model
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase__ : int = self.backbone_config.to_dict()
lowerCamelCase__ : int = self.__class__.model_type
return output
| 129
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''megatron-bert'''
def __init__( self : Dict , __UpperCamelCase : List[Any]=2_9056 , __UpperCamelCase : List[Any]=1024 , __UpperCamelCase : Tuple=24 , __UpperCamelCase : Optional[Any]=16 , __UpperCamelCase : Union[str, Any]=4096 , __UpperCamelCase : Union[str, Any]="gelu" , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Optional[Any]=512 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : Any=0.0_2 , __UpperCamelCase : List[str]=1E-12 , __UpperCamelCase : Dict=0 , __UpperCamelCase : Union[str, Any]="absolute" , __UpperCamelCase : Union[str, Any]=True , **__UpperCamelCase : Any , ) -> Optional[int]:
super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
| 256
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ :
def __init__( self , A , A=12 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=512 , A=0.0_2 , A=0 , A=None , ) -> Any:
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : str = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[Any] = is_training
UpperCAmelCase : int = use_input_mask
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : Dict = vocab_size
UpperCAmelCase : str = hidden_size
UpperCAmelCase : List[Any] = projection_dim
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Any = dropout
UpperCAmelCase : List[Any] = attention_dropout
UpperCAmelCase : Optional[Any] = max_position_embeddings
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = scope
UpperCAmelCase : Union[str, Any] = bos_token_id
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase : Tuple = input_mask.numpy()
UpperCAmelCase , UpperCAmelCase : int = input_mask.shape
UpperCAmelCase : Optional[int] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(A ):
UpperCAmelCase : Tuple = 1
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : int = self.get_config()
return config, input_ids, tf.convert_to_tensor(A )
def _lowercase( self ) -> int:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _lowercase( self , A , A , A ) -> Union[str, Any]:
UpperCAmelCase : int = TFBlipTextModel(config=A )
UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , training=A )
UpperCAmelCase : int = model(A , training=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = config_and_inputs
UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = (TFBlipTextModel,) if is_tf_available() else ()
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = BlipTextModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> Tuple:
self.config_tester.run_common_tests()
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> List[str]:
pass
def _lowercase( self ) -> Optional[int]:
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def _lowercase( self ) -> Union[str, Any]:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def _lowercase( self ) -> Optional[int]:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def _lowercase( self ) -> Dict:
pass
@slow
def _lowercase( self ) -> Dict:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Any = TFBlipTextModel.from_pretrained(A )
self.assertIsNotNone(A )
def _lowercase( self , A=True ) -> str:
super().test_pt_tf_model_equivalence(allow_missing_keys=A )
| 265
| 0
|
"""simple docstring"""
from math import sqrt
def _lowerCamelCase( a = 1_0_0_0_0_0_0 ):
__a = 0
__a = 0
__a = 4_2
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F'''{solution() = }''')
| 354
|
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
SCREAMING_SNAKE_CASE__:Any = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
SCREAMING_SNAKE_CASE__:Optional[int] = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
SCREAMING_SNAKE_CASE__:Tuple = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def _lowerCamelCase( a ):
__a = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _lowerCamelCase( a ):
return x[0]
def _lowerCamelCase( a ):
__a = get_letter_count(a )
__a = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(a )
__a = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=a )
__a = "".join(freq_to_letter[freq] )
__a = list(freq_to_letter_str.items() )
freq_pairs.sort(key=a , reverse=a )
__a = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(a )
def _lowerCamelCase( a ):
__a = get_frequency_order(a )
__a = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268
| 0
|
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
return "\n".join(
f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 7
|
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( UpperCamelCase = "laptop" ):
A = F"https://www.amazon.in/laptop/s?k={product}"
A = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
A = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
A = item.ha.text
A = "https://www.amazon.in/" + item.ha.a["href"]
A = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
A = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
A = "Not available"
try:
A = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
A = ""
try:
A = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
A = float("nan" )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = " "
A = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case : Optional[int] = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 292
| 0
|
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _UpperCAmelCase ( _UpperCamelCase : Any, _UpperCamelCase : int=(), _UpperCamelCase : Optional[Any]=None, _UpperCamelCase : Tuple="no", _UpperCamelCase : Any="29500" ) -> Any:
A_ = False
A_ = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
A_ = True
elif "IPython" in sys.modules:
A_ = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
A_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''', _UpperCamelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
A_ = 8
A_ = PrepareForLaunch(_UpperCamelCase, distributed_type='''TPU''' )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(_UpperCamelCase, args=_UpperCamelCase, nprocs=_UpperCamelCase, start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*_UpperCamelCase )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCamelCase, master_addr='''127.0.01''', master_port=_UpperCamelCase, mixed_precision=_UpperCamelCase ):
A_ = PrepareForLaunch(_UpperCamelCase, distributed_type='''MULTI_GPU''' )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(_UpperCamelCase, args=_UpperCamelCase, nprocs=_UpperCamelCase, start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
A_ = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*_UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : Any, _UpperCamelCase : List[str]=(), _UpperCamelCase : Union[str, Any]=2 ) -> Union[str, Any]:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCamelCase, master_addr='''127.0.01''', master_port='''29500''', accelerate_mixed_precision='''no''', accelerate_debug_rdv_file=tmp_file.name, accelerate_use_cpu='''yes''', ):
A_ = PrepareForLaunch(_UpperCamelCase, debug=_UpperCamelCase )
start_processes(_UpperCamelCase, args=_UpperCamelCase, nprocs=_UpperCamelCase, start_method='''fork''' )
| 18
|
'''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : float, _UpperCamelCase : list[float] ) -> float:
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
A_ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_UpperCamelCase ) )
return round(_UpperCamelCase, ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18
| 1
|
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : list[int] ):
A__ = len(_lowerCamelCase )
for i in range(_lowerCamelCase ):
for j in range(i + 1 , _lowerCamelCase ):
if numbers[j] < numbers[i]:
A__, A__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] =input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase : Dict =[int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 237
|
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """char"""
__lowercase = """bpe"""
__lowercase = """wp"""
__lowerCAmelCase : Union[str, Any] =(DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = ["""image_processor""", """char_tokenizer"""]
__lowercase = """ViTImageProcessor"""
__lowercase = """MgpstrTokenizer"""
def __init__( self :int , lowercase_ :int=None , lowercase_ :List[str]=None , **lowercase_ :List[Any] )-> Optional[Any]:
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
A__ = kwargs.pop("feature_extractor" )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
A__ = tokenizer
A__ = AutoTokenizer.from_pretrained("gpt2" )
A__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(lowercase_ , lowercase_ )
def __call__( self :Optional[Any] , lowercase_ :Any=None , lowercase_ :Tuple=None , lowercase_ :List[str]=None , **lowercase_ :Union[str, Any] )-> Optional[Any]:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
A__ = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None:
A__ = self.char_tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
A__ = encodings["input_ids"]
return inputs
def UpperCAmelCase_ ( self :List[str] , lowercase_ :int )-> int:
A__, A__, A__ = sequences
A__ = char_preds.size(0 )
A__, A__ = self._decode_helper(lowercase_ , "char" )
A__, A__ = self._decode_helper(lowercase_ , "bpe" )
A__, A__ = self._decode_helper(lowercase_ , "wp" )
A__ = []
A__ = []
for i in range(lowercase_ ):
A__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
A__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
A__ = scores.index(max(lowercase_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
A__ = {}
A__ = final_strs
A__ = final_scores
A__ = char_strs
A__ = bpe_strs
A__ = wp_strs
return out
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :List[str] , lowercase_ :str )-> Optional[Any]:
if format == DecodeType.CHARACTER:
A__ = self.char_decode
A__ = 1
A__ = "[s]"
elif format == DecodeType.BPE:
A__ = self.bpe_decode
A__ = 2
A__ = "#"
elif format == DecodeType.WORDPIECE:
A__ = self.wp_decode
A__ = 1_02
A__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
A__, A__ = [], []
A__ = pred_logits.size(0 )
A__ = pred_logits.size(1 )
A__, A__ = pred_logits.topk(1 , dim=-1 , largest=lowercase_ , sorted=lowercase_ )
A__ = preds_index.view(-1 , lowercase_ )[:, 1:]
A__ = decoder(lowercase_ )
A__, A__ = torch.nn.functional.softmax(lowercase_ , dim=2 ).max(dim=2 )
A__ = preds_max_prob[:, 1:]
for index in range(lowercase_ ):
A__ = preds_str[index].find(lowercase_ )
A__ = preds_str[index][:pred_eos]
A__ = preds_index[index].cpu().tolist()
A__ = pred_index.index(lowercase_ ) if eos_token in pred_index else -1
A__ = preds_max_prob[index][: pred_eos_index + 1]
A__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowercase_ )
conf_scores.append(lowercase_ )
return dec_strs, conf_scores
def UpperCAmelCase_ ( self :Dict , lowercase_ :Optional[Any] )-> int:
A__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(lowercase_ )]
return decode_strs
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Optional[Any] )-> List[str]:
return self.bpe_tokenizer.batch_decode(lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :List[str] )-> Union[str, Any]:
A__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(lowercase_ )]
return decode_strs
| 237
| 1
|
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE):
_lowercase : torch.FloatTensor
_lowercase : Optional[torch.FloatTensor] = None
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple=0.9_9_9 , SCREAMING_SNAKE_CASE : Any="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE : Optional[Any] ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE : Dict ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
a__ : Any =[]
for i in range(_SCREAMING_SNAKE_CASE ):
a__ : Optional[Any] =i / num_diffusion_timesteps
a__ : List[Any] =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
_lowercase : List[Any] = 1
@register_to_config
def __init__( self , lowerCAmelCase__ = 1_0_0_0 , lowerCAmelCase__ = 0.00_01 , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = "linear" , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = 0 , lowerCAmelCase__ = "epsilon" , lowerCAmelCase__ = 1.0 , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
if kwargs.get("set_alpha_to_one" , _snake_case ) is not None:
a__ : str =(
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , _snake_case , standard_warn=_snake_case )
a__ : List[str] =kwargs["set_alpha_to_one"]
if trained_betas is not None:
a__ : Union[str, Any] =torch.tensor(_snake_case , dtype=torch.floataa )
elif beta_schedule == "linear":
a__ : Union[str, Any] =torch.linspace(_snake_case , _snake_case , _snake_case , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a__ : List[str] =(
torch.linspace(beta_start**0.5 , beta_end**0.5 , _snake_case , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a__ : int =betas_for_alpha_bar(_snake_case )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
a__ : Optional[int] =1.0 - self.betas
a__ : List[str] =torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
a__ : Optional[Any] =torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
a__ : Tuple =1.0
# setable values
a__ : Union[str, Any] =None
a__ : Union[str, Any] =torch.from_numpy(np.arange(0 , _snake_case ).copy().astype(np.intaa ) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[Any]:
'''simple docstring'''
return sample
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> str:
'''simple docstring'''
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
F''' maximal {self.config.num_train_timesteps} timesteps.''' )
a__ : List[Any] =num_inference_steps
a__ : List[Any] =self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a__ : str =(np.arange(0 , _snake_case ) * step_ratio).round().copy().astype(np.intaa )
a__ : str =torch.from_numpy(_snake_case ).to(_snake_case )
self.timesteps += self.config.steps_offset
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = True , ) -> Any:
'''simple docstring'''
a__ : Tuple =timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
a__ : List[Any] =self.alphas_cumprod[timestep]
a__ : List[str] =(
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
a__ : Tuple =1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
a__ : str =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
a__ : Optional[Any] =model_output
elif self.config.prediction_type == "sample":
a__ : Union[str, Any] =model_output
a__ : Union[str, Any] =(sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
a__ : Union[str, Any] =(alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
a__ : List[str] =(alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
a__ : List[str] =pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a__ : Union[str, Any] =(1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a__ : str =alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_snake_case , pred_original_sample=_snake_case )
def __len__( self ) -> str:
'''simple docstring'''
return self.config.num_train_timesteps
| 351
|
import random
from typing import Any
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
for _ in range(len(SCREAMING_SNAKE_CASE ) ):
a__ : Tuple =random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 )
a__ : Any =random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 )
a__ , a__ : Tuple =data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase : Dict = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase : Optional[Any] = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 148
| 0
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any]=1_3 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=9_9 , __lowerCAmelCase : str=3_2 , __lowerCAmelCase : Tuple=5 , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : Optional[Any]=3_7 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : int=5_1_2 , __lowerCAmelCase : Union[str, Any]=1_6 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Dict=None , ):
"""simple docstring"""
_lowerCamelCase : Tuple = parent
_lowerCamelCase : int = batch_size
_lowerCamelCase : List[Any] = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : int = use_input_mask
_lowerCamelCase : Dict = use_token_type_ids
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Optional[int] = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : List[Any] = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : Dict = type_vocab_size
_lowerCamelCase : Dict = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : int = num_labels
_lowerCamelCase : str = num_choices
_lowerCamelCase : str = scope
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_input_mask:
_lowerCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Optional[int] = None
if self.use_token_type_ids:
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : int = None
_lowerCamelCase : Optional[int] = None
if self.use_labels:
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = NystromformerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : int = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
_lowerCamelCase : Any = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
_lowerCamelCase : str = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : int = NystromformerForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = NystromformerForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : int = self.num_labels
_lowerCamelCase : Union[str, Any] = NystromformerForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : str = self.num_labels
_lowerCamelCase : int = NystromformerForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.num_choices
_lowerCamelCase : str = NystromformerForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : int = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : List[Any] = config_and_inputs
_lowerCamelCase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : List[str] = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[Any] = (
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Optional[int] = False
snake_case__ : int = False
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = NystromformerModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : str = type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = NystromformerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Tuple = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
_lowerCamelCase : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_lowerCamelCase : Dict = model(__lowerCAmelCase )[0]
_lowerCamelCase : str = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , __lowerCAmelCase )
_lowerCamelCase : int = torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = '''the [MASK] of Belgium is Brussels'''
_lowerCamelCase : str = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
_lowerCamelCase : Any = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
_lowerCamelCase : Tuple = tokenizer(__lowerCAmelCase , return_tensors='''pt''' )
with torch.no_grad():
_lowerCamelCase : List[Any] = model(encoding.input_ids ).logits
_lowerCamelCase : str = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , '''capital''' )
| 72
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """roberta"""
def __init__( self , __lowerCamelCase=5_0265 , __lowerCamelCase=768 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3072 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=512 , __lowerCamelCase=2 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-1_2 , __lowerCamelCase=1 , __lowerCamelCase=0 , __lowerCamelCase=2 , __lowerCamelCase="absolute" , __lowerCamelCase=True , __lowerCamelCase=None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
__A : str = vocab_size
__A : Any = hidden_size
__A : Any = num_hidden_layers
__A : int = num_attention_heads
__A : List[Any] = hidden_act
__A : Any = intermediate_size
__A : List[str] = hidden_dropout_prob
__A : List[Any] = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : str = type_vocab_size
__A : int = initializer_range
__A : Tuple = layer_norm_eps
__A : str = position_embedding_type
__A : int = use_cache
__A : Optional[int] = classifier_dropout
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCamelCase__( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__A : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__A : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 179
| 0
|
def lowerCAmelCase_ ( _snake_case : int ) -> int:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ):
__magic_name__ : List[str] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_snake_case )
if number < 1:
__magic_name__ : Tuple = F'''Input value of [number={number}] must be > 0'''
raise ValueError(_snake_case )
__magic_name__ : str = 1
for i in range(1 , _snake_case ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
def lowerCAmelCase_ ( _snake_case : int ) -> bool:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ):
__magic_name__ : Union[str, Any] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_snake_case )
if number < 0:
return False
__magic_name__ : List[str] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
| 0
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """vocab.txt"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
lowerCamelCase__ = {
"""openbmb/cpm-ant-10b""": 1024,
}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowerCAmelCase__ : Any = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE_ , 'r' , encoding='utf-8' ) as reader:
lowerCAmelCase__ : Dict = reader.readlines()
for index, token in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : str = token.rstrip('\n' )
lowerCAmelCase__ : Any = index
return vocab
class A__ ( __magic_name__ ):
def __init__( self : Optional[Any] , a : List[str] , a : List[str]="<unk>" , a : Union[str, Any]=200 ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = vocab
lowerCAmelCase__ : Dict = unk_token
lowerCAmelCase__ : Any = max_input_chars_per_word
def _lowerCamelCase ( self : str , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = list(a )
if len(a ) > self.max_input_chars_per_word:
return [self.unk_token]
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : List[str] = []
while start < len(a ):
lowerCAmelCase__ : Dict = len(a )
lowerCAmelCase__ : Optional[Any] = None
while start < end:
lowerCAmelCase__ : List[str] = ''.join(chars[start:end] )
if substr in self.vocab:
lowerCAmelCase__ : int = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(a )
lowerCAmelCase__ : List[Any] = end
return sub_tokens
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = False
def __init__( self : List[Any] , a : List[Any] , a : Tuple="<d>" , a : int="</d>" , a : List[str]="<s>" , a : Any="</s>" , a : int="<pad>" , a : List[str]="<unk>" , a : Dict="</n>" , a : str="</_>" , a : Union[str, Any]="left" , **a : Optional[int] , ):
'''simple docstring'''
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=a , eod_token=a , bos_token=a , eos_token=a , pad_token=a , unk_token=a , line_token=a , space_token=a , padding_side=a , **a , )
lowerCAmelCase__ : Dict = bod_token
lowerCAmelCase__ : List[Any] = eod_token
lowerCAmelCase__ : Any = load_vocab(a )
lowerCAmelCase__ : Tuple = self.encoder[space_token]
lowerCAmelCase__ : int = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowerCAmelCase__ : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a : x[1] ) )
lowerCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ : int = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return self.encoder["\n"]
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.encoder )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self : Any , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = []
for x in jieba.cut(a , cut_all=a ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(a ) )
return output_tokens
def _lowerCamelCase ( self : Optional[Any] , a : Union[str, Any] , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = [i for i in token_ids if i >= 0]
lowerCAmelCase__ : Optional[Any] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(a , **a )
def _lowerCamelCase ( self : Tuple , a : Union[str, Any] ):
'''simple docstring'''
return token in self.encoder
def _lowerCamelCase ( self : Any , a : List[str] ):
'''simple docstring'''
return "".join(a )
def _lowerCamelCase ( self : Dict , a : Optional[Any] ):
'''simple docstring'''
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self : int , a : Optional[Any] ):
'''simple docstring'''
return self.decoder.get(a , self.unk_token )
def _lowerCamelCase ( self : Optional[int] , a : str , a : Optional[str] = None ):
'''simple docstring'''
if os.path.isdir(a ):
lowerCAmelCase__ : List[str] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowerCAmelCase__ : Tuple = (filename_prefix + '-' if filename_prefix else '') + save_directory
lowerCAmelCase__ : List[str] = 0
if " " in self.encoder:
lowerCAmelCase__ : Optional[Any] = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
lowerCAmelCase__ : str = self.encoder['\n']
del self.encoder["\n"]
lowerCAmelCase__ : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a : x[1] ) )
with open(a , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
lowerCAmelCase__ : Tuple = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def _lowerCamelCase ( self : List[Any] , a : List[int] , a : List[int] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowerCamelCase ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is not None:
return [1] + ([0] * len(a )) + [1] + ([0] * len(a ))
return [1] + ([0] * len(a ))
| 212
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class A__ :
def __init__( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = data
lowerCAmelCase__ : List[Any] = [0X67_452_301, 0Xef_cda_b89, 0X98_bad_cfe, 0X10_325_476, 0Xc3_d2e_1f0]
@staticmethod
def _lowerCamelCase ( a : int , a : List[str] ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xff_fff_fff
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = b'\x80' + b'\x00' * (63 - (len(self.data ) + 8) % 64)
lowerCAmelCase__ : List[str] = self.data + padding + struct.pack('>Q' , 8 * len(self.data ) )
return padded_data
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _lowerCamelCase ( self : Tuple , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = list(struct.unpack('>16L' , a ) ) + [0] * 64
for i in range(16 , 80 ):
lowerCAmelCase__ : int = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.padding()
lowerCAmelCase__ : List[Any] = self.split_blocks()
for block in self.blocks:
lowerCAmelCase__ : str = self.expand_block(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
lowerCAmelCase__ : Tuple = (b & c) | ((~b) & d)
lowerCAmelCase__ : int = 0X5a_827_999
elif 20 <= i < 40:
lowerCAmelCase__ : List[str] = b ^ c ^ d
lowerCAmelCase__ : Any = 0X6e_d9e_ba1
elif 40 <= i < 60:
lowerCAmelCase__ : Tuple = (b & c) | (b & d) | (c & d)
lowerCAmelCase__ : Tuple = 0X8f_1bb_cdc
elif 60 <= i < 80:
lowerCAmelCase__ : List[Any] = b ^ c ^ d
lowerCAmelCase__ : int = 0Xca_62c_1d6
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = (
self.rotate(a , 5 ) + f + e + k + expanded_block[i] & 0Xff_fff_fff,
a,
self.rotate(a , 30 ),
c,
d,
)
lowerCAmelCase__ : Optional[Any] = (
self.h[0] + a & 0Xff_fff_fff,
self.h[1] + b & 0Xff_fff_fff,
self.h[2] + c & 0Xff_fff_fff,
self.h[3] + d & 0Xff_fff_fff,
self.h[4] + e & 0Xff_fff_fff,
)
return ("{:08x}" * 5).format(*self.h )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Optional[int] = b'Test String'
assert SHAaHash(SCREAMING_SNAKE_CASE_ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE_ ).hexdigest() # noqa: S324
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : str = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowerCAmelCase__ : Dict = parser.parse_args()
lowerCAmelCase__ : Tuple = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowerCAmelCase__ : List[Any] = f.read()
else:
lowerCAmelCase__ : Tuple = bytes(SCREAMING_SNAKE_CASE_ , 'utf-8' )
print(SHAaHash(SCREAMING_SNAKE_CASE_ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 212
| 1
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( enum.Enum ):
'''simple docstring'''
__snake_case = 0
__snake_case = 1
@add_end_docstrings(_a )
class lowerCAmelCase_ ( _a ):
'''simple docstring'''
__snake_case = """generated"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCamelCase__ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
snake_case_ = {}
if truncation is not None:
snake_case_ = truncation
snake_case_ = generate_kwargs
snake_case_ = {}
if return_tensors is not None and return_type is None:
snake_case_ = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case_ = return_type
if clean_up_tokenization_spaces is not None:
snake_case_ = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case_ = self.tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
snake_case_ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
return True
def UpperCamelCase__ ( self , *_UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] , _UpperCAmelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
snake_case_ = ([prefix + arg for arg in args[0]],)
snake_case_ = True
elif isinstance(args[0] , _UpperCAmelCase ):
snake_case_ = (prefix + args[0],)
snake_case_ = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
snake_case_ = self.tokenizer(*_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
snake_case_ = super().__call__(*_UpperCAmelCase , **_UpperCAmelCase )
if (
isinstance(args[0] , _UpperCAmelCase )
and all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for el in args[0] )
and all(len(_UpperCAmelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase=TruncationStrategy.DO_NOT_TRUNCATE , **_UpperCAmelCase ):
snake_case_ = self._parse_and_tokenize(_UpperCAmelCase , truncation=_UpperCAmelCase , **_UpperCAmelCase )
return inputs
def UpperCamelCase__ ( self , _UpperCAmelCase , **_UpperCAmelCase ):
if self.framework == "pt":
snake_case_ = model_inputs['input_ids'].shape
elif self.framework == "tf":
snake_case_ = tf.shape(model_inputs['''input_ids'''] ).numpy()
snake_case_ = generate_kwargs.get('''min_length''' , self.model.config.min_length )
snake_case_ = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_UpperCAmelCase , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
snake_case_ = self.model.generate(**_UpperCAmelCase , **_UpperCAmelCase )
snake_case_ = output_ids.shape[0]
if self.framework == "pt":
snake_case_ = output_ids.reshape(_UpperCAmelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case_ = tf.reshape(_UpperCAmelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase=ReturnType.TEXT , _UpperCAmelCase=False ):
snake_case_ = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case_ = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
snake_case_ = {
F'''{self.return_name}_text''': self.tokenizer.decode(
_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , )
}
records.append(_UpperCAmelCase )
return records
@add_end_docstrings(_a )
class lowerCAmelCase_ ( _a ):
'''simple docstring'''
__snake_case = """summary"""
def __call__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return super().__call__(*_UpperCAmelCase , **_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(_a )
class lowerCAmelCase_ ( _a ):
'''simple docstring'''
__snake_case = """translation"""
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def UpperCamelCase__ ( self , *_UpperCAmelCase , _UpperCAmelCase=TruncationStrategy.DO_NOT_TRUNCATE , _UpperCAmelCase=None , _UpperCAmelCase=None ):
if getattr(self.tokenizer , '''_build_translation_inputs''' , _UpperCAmelCase ):
return self.tokenizer._build_translation_inputs(
*_UpperCAmelCase , return_tensors=self.framework , truncation=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase )
else:
return super()._parse_and_tokenize(*_UpperCAmelCase , truncation=_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
snake_case_ = super()._sanitize_parameters(**_UpperCAmelCase )
if src_lang is not None:
snake_case_ = src_lang
if tgt_lang is not None:
snake_case_ = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case_ = kwargs.get('''task''' , self.task )
snake_case_ = task.split('''_''' )
if task and len(_UpperCAmelCase ) == 4:
# translation, XX, to YY
snake_case_ = items[1]
snake_case_ = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return super().__call__(*_UpperCAmelCase , **_UpperCAmelCase )
| 368
|
import os
def __lowerCAmelCase ()-> List[Any]:
"""simple docstring"""
snake_case_ = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE ) , '''num.txt''' )
with open(SCREAMING_SNAKE_CASE ) as file_hand:
return str(sum(int(SCREAMING_SNAKE_CASE ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 267
| 0
|
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=sys.maxsize ) -> List[str]:
_A = """bilinear"""
_A = max_size
_A = short_edge_length
def __call__( self , lowerCAmelCase_ ) -> Dict:
_A = []
for img in imgs:
_A , _A = img.shape[:2]
# later: provide list and randomly choose index for resize
_A = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_A = size * 1.0 / min(lowerCAmelCase_ , lowerCAmelCase_ )
if h < w:
_A , _A = size, scale * w
else:
_A , _A = scale * h, size
if max(lowerCAmelCase_ , lowerCAmelCase_ ) > self.max_size:
_A = self.max_size * 1.0 / max(lowerCAmelCase_ , lowerCAmelCase_ )
_A = newh * scale
_A = neww * scale
_A = int(neww + 0.5 )
_A = int(newh + 0.5 )
if img.dtype == np.uinta:
_A = Image.fromarray(lowerCAmelCase_ )
_A = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_A = np.asarray(lowerCAmelCase_ )
else:
_A = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_A = nn.functional.interpolate(
lowerCAmelCase_ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase_ ).squeeze(0 )
img_augs.append(lowerCAmelCase_ )
return img_augs
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> Optional[int]:
_A = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_A = cfg.INPUT.FORMAT
_A = cfg.SIZE_DIVISIBILITY
_A = cfg.PAD_VALUE
_A = cfg.INPUT.MAX_SIZE_TEST
_A = cfg.MODEL.DEVICE
_A = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = lambda lowerCAmelCase_ : (x - self.pixel_mean) / self.pixel_std
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
_A = tuple(max(lowerCAmelCase_ ) for s in zip(*[img.shape for img in images] ) )
_A = [im.shape[-2:] for im in images]
_A = [
nn.functional.pad(
lowerCAmelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
return torch.stack(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Any:
with torch.no_grad():
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = [images]
if single_image:
assert len(lowerCAmelCase_ ) == 1
for i in range(len(lowerCAmelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCAmelCase_ , images.pop(lowerCAmelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCAmelCase_ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_A = torch.tensor([im.shape[:2] for im in images] )
_A = self.aug(lowerCAmelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_A = [self.normalizer(lowerCAmelCase_ ) for x in images]
# now pad them to do the following operations
_A , _A = self.pad(lowerCAmelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_A = torch.true_divide(lowerCAmelCase_ , lowerCAmelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def snake_case ( snake_case__ :Union[str, Any] , snake_case__ :Dict) -> Dict:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :Tuple[int, int]) -> int:
assert torch.isfinite(snake_case__).all(), "Box tensor contains infinite or NaN!"
_A , _A = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case__)
tensor[:, 1].clamp_(min=0 , max=snake_case__)
tensor[:, 2].clamp_(min=0 , max=snake_case__)
tensor[:, 3].clamp_(min=0 , max=snake_case__)
| 180
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 32
def snake_case ( snake_case__ :Optional[int]) -> str:
return int(x / 2**20)
class a :
"""simple docstring"""
def __enter__( self ) -> List[str]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_A = torch.cuda.memory_allocated()
return self
def __exit__( self , *lowerCAmelCase_ ) -> Optional[int]:
gc.collect()
torch.cuda.empty_cache()
_A = torch.cuda.memory_allocated()
_A = torch.cuda.max_memory_allocated()
_A = bamb(self.end - self.begin )
_A = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def snake_case ( snake_case__ :Accelerator , snake_case__ :int = 16 , snake_case__ :str = "bert-base-cased" , snake_case__ :int = 320 , snake_case__ :int = 160 , ) -> Dict:
_A = AutoTokenizer.from_pretrained(snake_case__)
_A = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": F'''train[:{n_train}]''', """validation""": F'''validation[:{n_val}]'''})
def tokenize_function(snake_case__ :Optional[int]):
# max_length=None => use the model max length (it's actually the default)
_A = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_A = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=snake_case__)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_A = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(snake_case__ :List[str]):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""")
return tokenizer.pad(snake_case__ , padding="""longest""" , return_tensors="""pt""")
# Instantiate dataloaders.
_A = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__)
_A = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__)
return train_dataloader, eval_dataloader
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[int]) -> Optional[int]:
# Initialize accelerator
_A = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_A = config["""lr"""]
_A = int(config["""num_epochs"""])
_A = int(config["""seed"""])
_A = int(config["""batch_size"""])
_A = args.model_name_or_path
set_seed(snake_case__)
_A , _A = get_dataloaders(snake_case__ , snake_case__ , snake_case__ , args.n_train , args.n_val)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_A = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__)
# Instantiate optimizer
_A = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_A = optimizer_cls(params=model.parameters() , lr=snake_case__)
if accelerator.state.deepspeed_plugin is not None:
_A = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_A = 1
_A = (len(snake_case__) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_A = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
_A = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_A , _A , _A , _A , _A = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
# We need to keep track of how many total steps we have iterated over
_A = 0
# We also need to keep track of the stating epoch so files are named properly
_A = 0
# Now we train the model
_A = {}
for epoch in range(snake_case__ , snake_case__):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(snake_case__):
_A = model(**snake_case__)
_A = outputs.loss
_A = loss / gradient_accumulation_steps
accelerator.backward(snake_case__)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin)))
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used))
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked))
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin)))
_A = tracemalloc.peaked + bamb(tracemalloc.begin)
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""") , """w""") as f:
json.dump(snake_case__ , snake_case__)
def snake_case ( ) -> Optional[int]:
_A = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""")
parser.add_argument(
"""--model_name_or_path""" , type=snake_case__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=snake_case__ , )
parser.add_argument(
"""--output_dir""" , type=snake_case__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=snake_case__ , default=snake_case__ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=snake_case__ , default=320 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=snake_case__ , default=160 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=snake_case__ , default=1 , help="""Number of train epochs.""" , )
_A = parser.parse_args()
_A = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__)
if __name__ == "__main__":
main()
| 180
| 1
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def A ( _lowercase ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def A ( ):
SCREAMING_SNAKE_CASE : List[Any] = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_lowercase )
SCREAMING_SNAKE_CASE : int = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_lowercase )
EnvironmentCommand.register_subcommand(_lowercase )
TestCommand.register_subcommand(_lowercase )
RunBeamCommand.register_subcommand(_lowercase )
DummyDataCommand.register_subcommand(_lowercase )
# Parse args
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = parser.parse_known_args()
if not hasattr(_lowercase , '''func''' ):
parser.print_help()
exit(1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = parse_unknown_args(_lowercase )
# Run
SCREAMING_SNAKE_CASE : Optional[Any] = args.func(_lowercase , **_lowercase )
service.run()
if __name__ == "__main__":
main()
| 258
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__UpperCamelCase : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowercase__ ( datasets.BuilderConfig):
UpperCamelCase_ = None
def A ( _lowercase , _lowercase , ):
import pyspark
def generate_fn():
SCREAMING_SNAKE_CASE : str = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
SCREAMING_SNAKE_CASE : Any = df_with_partition_id.select('''*''' ).where(f"""part_id = {partition_id}""" ).drop('''part_id''' )
SCREAMING_SNAKE_CASE : Tuple = partition_df.collect()
SCREAMING_SNAKE_CASE : str = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowercase__ ( _BaseExamplesIterable):
def __init__( self : Optional[Any] , UpperCamelCase__ : "pyspark.sql.DataFrame" , UpperCamelCase__ : Union[str, Any]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = df
SCREAMING_SNAKE_CASE : List[str] = partition_order or range(self.df.rdd.getNumPartitions() )
SCREAMING_SNAKE_CASE : Dict = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
'''simple docstring'''
yield from self.generate_examples_fn()
def __A ( self : Tuple , UpperCamelCase__ : np.random.Generator ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCamelCase__ )
return SparkExamplesIterable(self.df , partition_order=UpperCamelCase__ )
def __A ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.split_shard_indices_by_worker(UpperCamelCase__ , UpperCamelCase__ )
return SparkExamplesIterable(self.df , partition_order=UpperCamelCase__ )
@property
def __A ( self : Tuple ):
'''simple docstring'''
return len(self.partition_order )
class lowercase__ ( datasets.DatasetBuilder):
UpperCamelCase_ = SparkConfig
def __init__( self : Union[str, Any] , UpperCamelCase__ : "pyspark.sql.DataFrame" , UpperCamelCase__ : str = None , UpperCamelCase__ : str = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
import pyspark
SCREAMING_SNAKE_CASE : str = pyspark.sql.SparkSession.builder.getOrCreate()
SCREAMING_SNAKE_CASE : List[Any] = df
SCREAMING_SNAKE_CASE : Tuple = working_dir
super().__init__(
cache_dir=UpperCamelCase__ , config_name=str(self.df.semanticHash() ) , **UpperCamelCase__ , )
def __A ( self : Tuple ):
'''simple docstring'''
def create_cache_and_write_probe(UpperCamelCase__ : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCamelCase__ , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
SCREAMING_SNAKE_CASE : Dict = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCamelCase__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def __A ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __A ( self : str , UpperCamelCase__ : datasets.download.download_manager.DownloadManager ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __A ( self : int , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(UpperCamelCase__ : Tuple ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
SCREAMING_SNAKE_CASE : int = self.df.count()
SCREAMING_SNAKE_CASE : Tuple = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
SCREAMING_SNAKE_CASE : Optional[Any] = (
self.df.limit(UpperCamelCase__ )
.repartition(1 )
.mapInArrow(UpperCamelCase__ , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
SCREAMING_SNAKE_CASE : Optional[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
SCREAMING_SNAKE_CASE : List[str] = min(UpperCamelCase__ , int(approx_total_size / max_shard_size ) )
SCREAMING_SNAKE_CASE : Optional[int] = self.df.repartition(UpperCamelCase__ )
def __A ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int , ):
'''simple docstring'''
import pyspark
SCREAMING_SNAKE_CASE : int = ParquetWriter if file_format == '''parquet''' else ArrowWriter
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self._working_dir , os.path.basename(UpperCamelCase__ ) ) if self._working_dir else fpath
SCREAMING_SNAKE_CASE : Optional[int] = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
SCREAMING_SNAKE_CASE : str = self.config.features
SCREAMING_SNAKE_CASE : Optional[int] = self._writer_batch_size
SCREAMING_SNAKE_CASE : Optional[int] = self._fs.storage_options
def write_arrow(UpperCamelCase__ : Optional[Any] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
SCREAMING_SNAKE_CASE : int = pyspark.TaskContext().taskAttemptId()
SCREAMING_SNAKE_CASE : str = next(UpperCamelCase__ , UpperCamelCase__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = writer_class(
features=UpperCamelCase__ , path=working_fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , writer_batch_size=UpperCamelCase__ , storage_options=UpperCamelCase__ , embed_local_files=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Tuple = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCamelCase__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
SCREAMING_SNAKE_CASE : Optional[int] = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , writer_batch_size=UpperCamelCase__ , storage_options=UpperCamelCase__ , embed_local_files=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : List[str] = pa.Table.from_batches([batch] )
writer.write_table(UpperCamelCase__ )
if writer._num_bytes > 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : int = os.path.join(os.path.dirname(UpperCamelCase__ ) , os.path.basename(UpperCamelCase__ ) )
shutil.move(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = (
self.df.mapInArrow(UpperCamelCase__ , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __A ( self : Dict , UpperCamelCase__ : "datasets.SplitGenerator" , UpperCamelCase__ : str = "arrow" , UpperCamelCase__ : Optional[Union[str, int]] = None , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
self._validate_cache_dir()
SCREAMING_SNAKE_CASE : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = not is_remote_filesystem(self._fs )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join if is_local else posixpath.join
SCREAMING_SNAKE_CASE : List[Any] = '''-TTTTT-SSSSS-of-NNNNN'''
SCREAMING_SNAKE_CASE : List[str] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
SCREAMING_SNAKE_CASE : Dict = path_join(self._output_dir , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Dict = []
for task_id, content in self._prepare_split_single(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : int = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = total_num_examples
SCREAMING_SNAKE_CASE : Dict = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
SCREAMING_SNAKE_CASE : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
SCREAMING_SNAKE_CASE : str = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , ):
rename(
UpperCamelCase__ , fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , fpath.replace('''TTTTT-SSSSS''' , f"""{global_shard_id:05d}""" ).replace('''NNNNN''' , f"""{total_shards:05d}""" ) , )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = task_id_and_num_shards[i]
for shard_id in range(UpperCamelCase__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCamelCase__ , len(UpperCamelCase__ ) ).map(lambda UpperCamelCase__ : _rename_shard(*UpperCamelCase__ ) ).collect()
else:
# don't use any pattern
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , fpath.replace(UpperCamelCase__ , '''''' ) , )
def __A ( self : int , UpperCamelCase__ : "datasets.SplitGenerator" , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 258
| 1
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Tuple , *a__ : Union[str, Any] , a__ : Tuple=None , a__ : Optional[int]=None , **a__ : int ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
__snake_case = eval_examples
__snake_case = post_process_function
def a (self : int , a__ : int=None , a__ : Union[str, Any]=None , a__ : Optional[Any]=None , a__ : str = "eval" ):
"""simple docstring"""
__snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case = self.get_eval_dataloader(a__ )
__snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case = time.time()
try:
__snake_case = eval_loop(
a__ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a__ , metric_key_prefix=a__ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
a__ , a__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case = self.post_process_function(a__ , a__ , output.predictions )
__snake_case = self.compute_metrics(a__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__snake_case = metrics.pop(a__ )
metrics.update(output.metrics )
else:
__snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(a__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , a__ )
return metrics
def a (self : str , a__ : List[Any] , a__ : int , a__ : List[str]=None , a__ : str = "test" ):
"""simple docstring"""
__snake_case = self.get_test_dataloader(a__ )
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case = time.time()
try:
__snake_case = eval_loop(
a__ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a__ , metric_key_prefix=a__ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
a__ , a__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case = self.post_process_function(a__ , a__ , output.predictions , '''predict''' )
__snake_case = self.compute_metrics(a__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__snake_case = metrics.pop(a__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=a__ )
| 24
|
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : str =BigBirdTokenizer
a : Union[str, Any] =BigBirdTokenizerFast
a : Tuple =True
a : Any =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase : str = self.tokenizer_class(snake_case__ , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = "<s>"
lowerCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(snake_case__ ) , 1_004 )
def lowercase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowercase__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase : Tuple = self.get_tokenizer()
lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
lowerCAmelCase : Tuple = "I was born in 92000, and this is falsé."
lowerCAmelCase : Optional[int] = tokenizer.tokenize(snake_case__ )
lowerCAmelCase : int = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
lowerCAmelCase : int = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = self.get_rust_tokenizer()
lowerCAmelCase : Tuple = tokenizer.encode(snake_case__ )
lowerCAmelCase : List[Any] = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = BigBirdTokenizer(snake_case__ , keep_accents=snake_case__ )
lowerCAmelCase : Union[str, Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [285, 46, 10, 170, 382] , )
lowerCAmelCase : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase : str = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = "Hello World!"
lowerCAmelCase : Any = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
lowerCAmelCase : List[str] = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@require_torch
@slow
def lowercase__ ( self ):
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowerCAmelCase : Dict = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase : int = " ".join(snake_case__ )
lowerCAmelCase : Dict = self.big_tokenizer.encode_plus(snake_case__ , return_tensors="pt" , return_token_type_ids=snake_case__ )
lowerCAmelCase : Any = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=snake_case__ )
lowerCAmelCase : str = BigBirdConfig(attention_type="original_full" )
lowerCAmelCase : Any = BigBirdModel(snake_case__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**snake_case__ )
model(**snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
lowerCAmelCase : Union[str, Any] = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = {"input_ids": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 108
| 0
|
def a_ ( __snake_case : str ) -> int:
"""simple docstring"""
lowerCamelCase_ =hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
lowerCamelCase_ =hex_num[0] == "-"
if is_negative:
lowerCamelCase_ =hex_num[1:]
try:
lowerCamelCase_ =int(_SCREAMING_SNAKE_CASE , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
lowerCamelCase_ =""
while int_num > 0:
lowerCamelCase_ =str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
'''simple docstring'''
import datasets
from .evaluate import evaluate
a_ : List[Any] = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
a_ : List[Any] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
a_ : Any = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ), codebase_urls=['''https://www.atticusprojectai.org/cuad'''], reference_urls=['''https://www.atticusprojectai.org/cuad'''], )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowerCamelCase_ =[
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowerCamelCase_ =evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase )
return score
| 6
| 0
|
import math
import os
import sys
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = ''''''
try:
with open(_a , '''rb''' ) as binary_file:
lowerCAmelCase__ : Optional[int] = binary_file.read()
for dat in data:
lowerCAmelCase__ : List[Any] = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCamelCase_ ( _a , _a , _a , _a ):
"""simple docstring"""
lexicon.pop(_a )
lowerCAmelCase__ : str = last_match_id
if math.loga(_a ).is_integer():
for curr_key in lexicon:
lowerCAmelCase__ : Union[str, Any] = '''0''' + lexicon[curr_key]
lowerCAmelCase__ : int = bin(_a )[2:]
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = {'''0''': '''0''', '''1''': '''1'''}
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = '''''', ''''''
lowerCAmelCase__ : List[Any] = len(_a )
for i in range(len(_a ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCAmelCase__ : List[Any] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_a , _a , _a , _a )
index += 1
lowerCAmelCase__ : Optional[int] = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
lowerCAmelCase__ : Tuple = lexicon[curr_string]
result += last_match_id
return result
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = os.path.getsize(_a )
lowerCAmelCase__ : Tuple = bin(_a )[2:]
lowerCAmelCase__ : int = len(_a )
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = 8
try:
with open(_a , '''wb''' ) as opened_file:
lowerCAmelCase__ : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 , len(_a ) , _a )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_a , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = read_file_binary(_a )
lowerCAmelCase__ : Dict = compress_data(_a )
lowerCAmelCase__ : Tuple = add_file_length(_a , _a )
write_file_binary(_a , _a )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 131
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCamelCase = logging.get_logger(__name__)
class _a ( _lowercase):
_a : Optional[Any] = ['''pixel_values''']
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , _SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , **_SCREAMING_SNAKE_CASE : int , )-> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 256}
lowerCAmelCase__ : Tuple = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowerCAmelCase__ : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
lowerCAmelCase__ : List[str] = do_resize
lowerCAmelCase__ : Optional[Any] = size
lowerCAmelCase__ : Any = resample
lowerCAmelCase__ : str = do_center_crop
lowerCAmelCase__ : Dict = crop_size
lowerCAmelCase__ : str = do_rescale
lowerCAmelCase__ : List[str] = rescale_factor
lowerCAmelCase__ : int = do_normalize
lowerCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Dict[str, int] , _SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : Dict , )-> np.ndarray:
lowerCAmelCase__ : str = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCAmelCase__ : List[str] = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=_SCREAMING_SNAKE_CASE )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Dict[str, int] , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : List[str] , )-> np.ndarray:
lowerCAmelCase__ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : Optional[int] )-> np.ndarray:
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Union[float, List[float]] , _SCREAMING_SNAKE_CASE : Union[float, List[float]] , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : str , )-> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : ImageInput , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : PILImageResampling = None , _SCREAMING_SNAKE_CASE : bool = None , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Optional[float] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE : Tuple , )-> Optional[Any]:
lowerCAmelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : List[str] = size if size is not None else self.size
lowerCAmelCase__ : Any = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = resample if resample is not None else self.resample
lowerCAmelCase__ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : Any = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
lowerCAmelCase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : str = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : List[Any] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : Optional[int] = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase__ : List[Any] = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
lowerCAmelCase__ : Dict = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
lowerCAmelCase__ : Dict = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowerCAmelCase__ : List[Any] = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
lowerCAmelCase__ : Tuple = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
lowerCAmelCase__ : Dict = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
lowerCAmelCase__ : Dict = {'''pixel_values''': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Tuple] = None )-> List[Any]:
lowerCAmelCase__ : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Tuple = target_sizes.numpy()
lowerCAmelCase__ : Tuple = []
for idx in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase__ : Any = logits.argmax(dim=1 )
lowerCAmelCase__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 131
| 1
|
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
SCREAMING_SNAKE_CASE = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
SCREAMING_SNAKE_CASE = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=False , lowercase_=False , lowercase_=True , lowercase_=False , lowercase_="dummy_doc" ) -> str:
A__ = {doc: key_lines}
A__ = {doc: sys_lines}
A__ = {}
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__, A__ = reader.get_doc_mentions(lowercase_ , key_doc_lines[doc] , lowercase_ )
key_singletons_num += singletons_num
if NP_only or min_span:
A__ = reader.set_annotated_parse_trees(lowercase_ , key_doc_lines[doc] , lowercase_ , lowercase_ )
A__, A__ = reader.get_doc_mentions(lowercase_ , sys_doc_lines[doc] , lowercase_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
A__ = reader.set_annotated_parse_trees(lowercase_ , key_doc_lines[doc] , lowercase_ , lowercase_ )
if remove_nested:
A__, A__ = reader.remove_nested_coref_mentions(lowercase_ , lowercase_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
A__, A__ = reader.remove_nested_coref_mentions(lowercase_ , lowercase_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
A__ = reader.get_mention_assignments(lowercase_ , lowercase_ )
A__ = reader.get_mention_assignments(lowercase_ , lowercase_ )
A__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
"Number of resulting singleton clusters in the key "
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
"files, respectively" )
return doc_coref_infos
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
A__ = get_coref_infos(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
A__ = {}
A__ = 0
A__ = 0
for name, metric in metrics:
A__, A__, A__ = evaluator.evaluate_documents(lowercase_ , lowercase_ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 1_00:.2f}""" , f""" Precision: {precision * 1_00:.2f}""" , f""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
A__ = (conll / 3) * 1_00
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({"conll_score": conll} )
return output_scores
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
A__ = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
A__ = line.split()[5]
if not parse_col == "-":
A__ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __magic_name__ ( self : int ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def __magic_name__ ( self : Tuple , snake_case_ : Any , snake_case_ : str , snake_case_ : int=True , snake_case_ : str=False , snake_case_ : int=False , snake_case_ : str=False ) -> Union[str, Any]:
'''simple docstring'''
A__ = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
A__ = util.check_gold_parse_annotation(snake_case_ )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
A__ = evaluate(
key_lines=snake_case_ , sys_lines=snake_case_ , metrics=snake_case_ , NP_only=snake_case_ , remove_nested=snake_case_ , keep_singletons=snake_case_ , min_span=snake_case_ , )
return score
| 230
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def _SCREAMING_SNAKE_CASE ( lowercase_=None ) -> Any:
if subparsers is not None:
A__ = subparsers.add_parser("env" )
else:
A__ = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" , default=lowercase_ , help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=lowercase_ )
return parser
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
A__ = torch.__version__
A__ = torch.cuda.is_available()
A__ = is_xpu_available()
A__ = is_npu_available()
A__ = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowercase_ ):
A__ = load_config_from_file(args.config_file ).to_dict()
A__ = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""",
"PyTorch XPU available": str(lowercase_ ),
"PyTorch NPU available": str(lowercase_ ),
"System RAM": f"""{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB""",
}
if pt_cuda_available:
A__ = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([f"""- {prop}: {val}""" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
A__ = (
"\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowercase_ , lowercase_ )
else f"""\t{accelerate_config}"""
)
print(lowercase_ )
A__ = accelerate_config
return info
def _SCREAMING_SNAKE_CASE ( ) -> int:
A__ = env_command_parser()
A__ = parser.parse_args()
env_command(lowercase_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 230
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A , A=7 , A=3 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=False , A=True , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 18, """width""": 20}
_SCREAMING_SNAKE_CASE = do_thumbnail
_SCREAMING_SNAKE_CASE = do_align_axis
_SCREAMING_SNAKE_CASE = do_pad
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean
_SCREAMING_SNAKE_CASE = image_std
def snake_case_( self ) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = DonutImageProcessor if is_vision_available() else None
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = DonutImageProcessingTester(self )
@property
def snake_case_( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , """do_resize""" ) )
self.assertTrue(hasattr(A , """size""" ) )
self.assertTrue(hasattr(A , """do_thumbnail""" ) )
self.assertTrue(hasattr(A , """do_align_long_axis""" ) )
self.assertTrue(hasattr(A , """do_pad""" ) )
self.assertTrue(hasattr(A , """do_normalize""" ) )
self.assertTrue(hasattr(A , """image_mean""" ) )
self.assertTrue(hasattr(A , """image_std""" ) )
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def snake_case_( self ) -> Optional[int]:
pass
@is_flaky()
def snake_case_( self ) -> Optional[int]:
# Initialize image_processing
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def snake_case_( self ) -> List[Any]:
# Initialize image_processing
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def snake_case_( self ) -> Any:
# Initialize image_processing
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 58
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''rwkv'''
__magic_name__ = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : str , lowerCAmelCase_ : str=50_277 , lowerCAmelCase_ : Optional[int]=1_024 , lowerCAmelCase_ : Optional[int]=4_096 , lowerCAmelCase_ : Union[str, Any]=32 , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]=1e-5 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Any=True , **lowerCAmelCase_ : List[Any] , ) -> List[str]:
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : List[str] = context_length
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : Optional[int] = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase_ : Dict = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase_ : Any = layer_norm_epsilon
UpperCAmelCase_ : List[Any] = rescale_every
UpperCAmelCase_ : List[str] = use_cache
UpperCAmelCase_ : List[str] = bos_token_id
UpperCAmelCase_ : Union[str, Any] = eos_token_id
super().__init__(
tie_word_embeddings=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
| 268
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase: Optional[int] = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: List[Any] = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase: Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 367
|
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCAmelCase: str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , ):
import pyspark
def generate_fn():
_lowercase : List[Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
_lowercase : Optional[int] = df_with_partition_id.select("""*""" ).where(F"""part_id = {partition_id}""" ).drop("""part_id""" )
_lowercase : int = partition_df.collect()
_lowercase : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class UpperCamelCase ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ,):
_lowercase : Union[str, Any] = df
_lowercase : List[str] = partition_order or range(self.df.rdd.getNumPartitions() )
_lowercase : Tuple = _generate_iterable_examples(self.df ,self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCAmelCase_ )
return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = self.split_shard_indices_by_worker(UpperCAmelCase_ ,UpperCAmelCase_ )
return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ )
@property
def lowerCamelCase__ ( self ):
return len(self.partition_order )
class UpperCamelCase ( datasets.DatasetBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = SparkConfig
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
import pyspark
_lowercase : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
_lowercase : List[Any] = df
_lowercase : int = working_dir
super().__init__(
cache_dir=UpperCAmelCase_ ,config_name=str(self.df.semanticHash() ) ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(UpperCAmelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir ,exist_ok=UpperCAmelCase_ )
_lowercase : Union[str, Any] = os.path.join(self._cache_dir ,"""fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCAmelCase_ ,"""a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" ,"""""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_lowercase : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(UpperCAmelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def lowerCamelCase__ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
import pyspark
def get_arrow_batch_size(UpperCAmelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
_lowercase : List[str] = self.df.count()
_lowercase : List[str] = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_lowercase : Union[str, Any] = (
self.df.limit(UpperCAmelCase_ )
.repartition(1 )
.mapInArrow(UpperCAmelCase_ ,"""batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_lowercase : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_lowercase : int = min(UpperCAmelCase_ ,int(approx_total_size / max_shard_size ) )
_lowercase : List[Any] = self.df.repartition(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
import pyspark
_lowercase : Union[str, Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter
_lowercase : List[Any] = os.path.join(self._working_dir ,os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath
_lowercase : Any = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_lowercase : Union[str, Any] = self.config.features
_lowercase : Optional[int] = self._writer_batch_size
_lowercase : Optional[Any] = self._fs.storage_options
def write_arrow(UpperCAmelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_lowercase : Any = pyspark.TaskContext().taskAttemptId()
_lowercase : List[str] = next(UpperCAmelCase_ ,UpperCAmelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
_lowercase : List[Any] = 0
_lowercase : int = writer_class(
features=UpperCAmelCase_ ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,)
_lowercase : Optional[int] = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCAmelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_lowercase , _lowercase : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
shard_id += 1
_lowercase : Union[str, Any] = writer_class(
features=writer._features ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,)
_lowercase : Dict = pa.Table.from_batches([batch] )
writer.write_table(UpperCAmelCase_ )
if writer._num_bytes > 0:
_lowercase , _lowercase : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ):
_lowercase : Dict = os.path.join(os.path.dirname(UpperCAmelCase_ ) ,os.path.basename(UpperCAmelCase_ ) )
shutil.move(UpperCAmelCase_ ,UpperCAmelCase_ )
_lowercase : List[str] = (
self.df.mapInArrow(UpperCAmelCase_ ,"""task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) ,pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) ,pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) ,pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) ,)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = "arrow" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
self._validate_cache_dir()
_lowercase : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCAmelCase_ )
_lowercase : Optional[int] = not is_remote_filesystem(self._fs )
_lowercase : Dict = os.path.join if is_local else posixpath.join
_lowercase : int = """-TTTTT-SSSSS-of-NNNNN"""
_lowercase : Optional[Any] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
_lowercase : Dict = path_join(self._output_dir ,UpperCAmelCase_ )
_lowercase : List[Any] = 0
_lowercase : Optional[Any] = 0
_lowercase : int = 0
_lowercase : Any = []
_lowercase : Any = []
for task_id, content in self._prepare_split_single(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCAmelCase_ )
_lowercase : Optional[int] = total_num_examples
_lowercase : List[Any] = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
_lowercase : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_lowercase : Union[str, Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
rename(
UpperCAmelCase_ ,fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace("""TTTTT-SSSSS""" ,f"""{global_shard_id:05d}""" ).replace("""NNNNN""" ,f"""{total_shards:05d}""" ) ,)
_lowercase : Optional[Any] = []
_lowercase : List[str] = 0
for i in range(len(UpperCAmelCase_ ) ):
_lowercase , _lowercase : List[str] = task_id_and_num_shards[i]
for shard_id in range(UpperCAmelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCAmelCase_ ,len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect()
else:
# don't use any pattern
_lowercase : Tuple = 0
_lowercase : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace(UpperCAmelCase_ ,"""""" ) ,)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,):
return SparkExamplesIterable(self.df )
| 336
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class a__ ( A__ ):
A = 'audio-spectrogram-transformer'
def __init__( self : Tuple,_A : Tuple=768,_A : str=12,_A : List[Any]=12,_A : Any=3072,_A : Optional[int]="gelu",_A : Optional[int]=0.0,_A : Optional[int]=0.0,_A : int=0.02,_A : Optional[int]=1E-12,_A : Tuple=16,_A : List[str]=True,_A : Union[str, Any]=10,_A : Tuple=10,_A : Optional[Any]=1024,_A : Tuple=128,**_A : str,):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : str = num_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = intermediate_size
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE_ : Tuple = qkv_bias
SCREAMING_SNAKE_CASE_ : str = frequency_stride
SCREAMING_SNAKE_CASE_ : int = time_stride
SCREAMING_SNAKE_CASE_ : Any = max_length
SCREAMING_SNAKE_CASE_ : Optional[int] = num_mel_bins
| 18
|
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a__ ( A__ ):
A = 'perceiver'
def __init__( self : List[Any],_A : Tuple=256,_A : str=1280,_A : List[Any]=768,_A : Union[str, Any]=1,_A : Union[str, Any]=26,_A : List[str]=8,_A : List[Any]=8,_A : List[Any]=None,_A : List[Any]=None,_A : Union[str, Any]="kv",_A : Any=1,_A : int=1,_A : Dict="gelu",_A : Any=0.1,_A : int=0.02,_A : int=1E-12,_A : Any=True,_A : Optional[Any]=262,_A : List[Any]=2048,_A : str=56,_A : Optional[int]=[368, 496],_A : Dict=16,_A : Tuple=1920,_A : List[Any]=16,_A : str=[1, 16, 224, 224],**_A : Optional[Any],):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Dict = num_latents
SCREAMING_SNAKE_CASE_ : List[Any] = d_latents
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = num_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE_ : Tuple = num_self_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = num_cross_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = qk_channels
SCREAMING_SNAKE_CASE_ : Any = v_channels
SCREAMING_SNAKE_CASE_ : Any = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE_ : List[str] = self_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Any = cross_attention_widening_factor
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE_ : Dict = image_size
# flow attributes
SCREAMING_SNAKE_CASE_ : List[Any] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE_ : str = num_frames
SCREAMING_SNAKE_CASE_ : Any = audio_samples_per_frame
SCREAMING_SNAKE_CASE_ : Tuple = samples_per_patch
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_shape
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 1E-4
def __UpperCamelCase ( self : List[str],_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional[TensorType] = None,_A : int = 3,_A : int = 40,_A : int = 40,):
"""simple docstring"""
if isinstance(_A,_A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = preprocessor.num_special_tokens_to_add(_A )
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Optional[Any] = [" ".join(["a"] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : str = dict(preprocessor(_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : List[str] = inputs.pop("input_ids" )
return inputs
elif isinstance(_A,_A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(_A,fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_images(_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Any = dict(preprocessor(images=_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : Any = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 18
| 1
|
from sklearn.metrics import matthews_corrcoef
import datasets
A_ : Any = '\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n'
A_ : int = '\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results[\'matthews_correlation\'], 2))\n -0.25\n'
A_ : Optional[int] = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a (datasets.Metric ):
'''simple docstring'''
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def __A ( self , A__ , A__ , A__=None ):
return {
"matthews_correlation": float(matthews_corrcoef(A__ , A__ , sample_weight=A__ ) ),
}
| 365
|
import argparse
from collections import defaultdict
def UpperCamelCase (lowercase_: List[str] , lowercase_: Optional[int] , lowercase_: Optional[Any] , lowercase_: Union[str, Any] , lowercase_: Any ) -> int:
A__ : Optional[Any] = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(lowercase_ , """r""" ) as f:
A__ : Union[str, Any] = f.readlines()
A__ : str = f"""class {class_name}("""
A__ : Optional[Any] = f"""{4 * ' '}def {test_name}("""
A__ : Union[str, Any] = f"""{8 * ' '}{correct_line.split()[0]}"""
A__ : Optional[int] = f"""{16 * ' '}{correct_line.split()[0]}"""
A__ : int = False
A__ : str = False
A__ : Tuple = False
A__ : Optional[int] = False
A__ : Optional[Any] = 0
A__ : Dict = 0
A__ : List[str] = []
for line in lines:
if line.startswith(lowercase_ ):
A__ : Dict = True
elif in_class and line.startswith(lowercase_ ):
A__ : Optional[Any] = True
elif in_class and in_func and (line.startswith(lowercase_ ) or line.startswith(lowercase_ )):
A__ : Tuple = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
A__ : Any = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
A__ : Dict = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * ' '}{correct_line}""" )
A__ : List[str] = False
else:
new_lines.append(lowercase_ )
with open(lowercase_ , """w""" ) as f:
for line in new_lines:
f.write(lowercase_ )
def UpperCamelCase (lowercase_: List[str] , lowercase_: Optional[Any]=None ) -> Any:
if fail is not None:
with open(lowercase_ , """r""" ) as f:
A__ : Dict = {l.strip() for l in f.readlines()}
else:
A__ : List[str] = None
with open(lowercase_ , """r""" ) as f:
A__ : int = f.readlines()
A__ : Union[str, Any] = defaultdict(lowercase_ )
for line in correct_lines:
A__ , A__ , A__ , A__ : Optional[int] = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
A_ : Optional[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 141
| 0
|
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__A : Optional[int] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __snake_case :
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : str=16 , lowerCamelCase : Optional[Any]=13 , lowerCamelCase : List[str]=7 , lowerCamelCase : List[str]=14 , lowerCamelCase : Dict=10 , lowerCamelCase : List[Any]=19 , lowerCamelCase : Optional[int]=5 , lowerCamelCase : List[str]=4 , lowerCamelCase : List[str]=True , lowerCamelCase : int=16 , lowerCamelCase : Any=2 , lowerCamelCase : Union[str, Any]=4 , lowerCamelCase : List[str]=4 , lowerCamelCase : Any="gelu" , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : Any=0.1 , lowerCamelCase : Union[str, Any]=[1, 2, 3, 4, 5] , lowerCamelCase : Dict=25 , lowerCamelCase : Optional[int]=5 , ) -> List[Any]:
lowerCAmelCase_ : Tuple = d_model
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : Tuple = batch_size
lowerCAmelCase_ : Union[str, Any] = prediction_length
lowerCAmelCase_ : Union[str, Any] = context_length
lowerCAmelCase_ : List[Any] = cardinality
lowerCAmelCase_ : Optional[int] = num_time_features
lowerCAmelCase_ : Dict = lags_sequence
lowerCAmelCase_ : Any = embedding_dimension
lowerCAmelCase_ : Union[str, Any] = is_training
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : Optional[int] = num_hidden_layers
lowerCAmelCase_ : List[str] = num_attention_heads
lowerCAmelCase_ : Optional[int] = intermediate_size
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : List[str] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[int] = context_length
lowerCAmelCase_ : Any = prediction_length + label_length
lowerCAmelCase_ : int = label_length
lowerCAmelCase_ : Any = moving_average
lowerCAmelCase_ : Optional[Any] = autocorrelation_factor
def __lowercase ( self : Optional[int] ) -> str:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __lowercase ( self : Dict , lowerCamelCase : Optional[int] ) -> List[Any]:
lowerCAmelCase_ : Tuple = config.context_length + max(config.lags_sequence )
lowerCAmelCase_ : str = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowerCAmelCase_ : str = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowerCAmelCase_ : int = floats_tensor([self.batch_size, _past_length] )
lowerCAmelCase_ : Tuple = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowerCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowerCAmelCase_ : Dict = floats_tensor([self.batch_size, config.prediction_length] )
lowerCAmelCase_ : Any = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def __lowercase ( self : Any ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = self.get_config()
lowerCAmelCase_ : Any = self.prepare_autoformer_inputs_dict(SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def __lowercase ( self : Dict ) -> Optional[int]:
lowerCAmelCase_ : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowercase ( self : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple ) -> int:
lowerCAmelCase_ : Dict = AutoformerModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).eval()
lowerCAmelCase_ : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Dict = outputs.encoder_last_hidden_state
lowerCAmelCase_ : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ : Tuple = model.get_encoder()
encoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : str = AutoformerEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : int = model.create_network_inputs(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : int = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowerCAmelCase_ : Dict = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowerCAmelCase_ : List[str] = encoder(inputs_embeds=SCREAMING_SNAKE_CASE__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
lowerCAmelCase_ : Union[str, Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowerCAmelCase_ : Tuple = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowerCAmelCase_ : List[str] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowerCAmelCase_ : Tuple = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ : int = model.get_decoder()
decoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Union[str, Any] = AutoformerDecoder.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : List[str] = decoder(
trend=SCREAMING_SNAKE_CASE__ , inputs_embeds=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __snake_case ( A__ ,A__ ,unittest.TestCase):
"""simple docstring"""
lowercase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowercase = (AutoformerForPrediction,) if is_torch_available() else ()
lowercase = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def __lowercase ( self : int ) -> str:
lowerCAmelCase_ : str = AutoformerModelTester(self )
lowerCAmelCase_ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def __lowercase ( self : Union[str, Any] ) -> List[str]:
self.config_tester.run_common_tests()
def __lowercase ( self : Any ) -> List[Any]:
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Tuple = model_class(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Any = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertEqual(info["""missing_keys"""] , [] )
def __lowercase ( self : List[str] ) -> Tuple:
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def __lowercase ( self : Dict ) -> List[str]:
pass
def __lowercase ( self : List[Any] ) -> Tuple:
lowerCAmelCase_ : Tuple = inspect.signature(getattr(SCREAMING_SNAKE_CASE__ , """forward""" ) )
# The main input is the name of the argument after `self`
lowerCAmelCase_ : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , SCREAMING_SNAKE_CASE__ )
def __lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : List[Any] = [*signature.parameters.keys()]
lowerCAmelCase_ : List[Any] = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(SCREAMING_SNAKE_CASE__ )] , SCREAMING_SNAKE_CASE__ )
def __lowercase ( self : Optional[int] ) -> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : Dict = getattr(self.model_tester , """seq_length""" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Any = getattr(self.model_tester , """decoder_seq_length""" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Union[str, Any] = getattr(self.model_tester , """encoder_seq_length""" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : List[str] = getattr(self.model_tester , """d_model""" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Any = getattr(self.model_tester , """num_attention_heads""" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : str = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : Tuple = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase_ : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : Dict = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase_ : Any = outputs.encoder_attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowerCAmelCase_ : str = len(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : int = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# decoder attentions
lowerCAmelCase_ : Union[str, Any] = outputs.decoder_attentions
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowerCAmelCase_ : Any = outputs.cross_attentions
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowerCAmelCase_ : Any = True
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : str = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : Dict = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(out_len + 2 , len(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase_ : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __lowercase ( self : Dict ) -> List[str]:
super().test_retain_grad_hidden_states_attentions()
def UpperCamelCase_ ( A__ : Dict="train-batch.pt" ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_UpperCamelCase , repo_type="""dataset""" )
lowerCAmelCase_ : Optional[Any] = torch.load(_UpperCamelCase , map_location=_UpperCamelCase )
return batch
@require_torch
@slow
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : str ) -> int:
lowerCAmelCase_ : List[str] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Optional[int] = prepare_batch()
with torch.no_grad():
lowerCAmelCase_ : Any = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
lowerCAmelCase_ : Any = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
def __lowercase ( self : Dict ) -> Optional[int]:
lowerCAmelCase_ : str = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Optional[int] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
lowerCAmelCase_ : Optional[int] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
lowerCAmelCase_ : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : List[Any] = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
def __lowercase ( self : str ) -> Union[str, Any]:
lowerCAmelCase_ : List[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Tuple = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
lowerCAmelCase_ : Tuple = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
lowerCAmelCase_ : Any = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Any = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Tuple = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , SCREAMING_SNAKE_CASE__ , rtol=1E-1 ) )
| 120
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __SCREAMING_SNAKE_CASE ( pl.LightningModule ):
def __init__( self , SCREAMING_SNAKE_CASE__ ):
super().__init__()
lowercase : Any = model
lowercase : Optional[Any] = 2
lowercase : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __lowerCamelCase ( self ):
pass
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : str = LongformerModel.from_pretrained(_UpperCamelCase )
lowercase : int = LightningModel(_UpperCamelCase )
lowercase : Union[str, Any] = torch.load(_UpperCamelCase, map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowercase : List[Any] = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 337
| 0
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
# Checks if the entire collection has been sorted
if len(lowerCamelCase__ ) <= 1 or n <= 1:
return
insert_next(lowerCamelCase__ , n - 1 )
rec_insertion_sort(lowerCamelCase__ , n - 1 )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
# Checks order between adjacent elements
if index >= len(lowerCamelCase__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__lowerCamelCase , __lowerCamelCase : List[str] = (
collection[index],
collection[index - 1],
)
insert_next(lowerCamelCase__ , index + 1 )
if __name__ == "__main__":
a =input("""Enter integers separated by spaces: """)
a =[int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 113
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class A_ ( unittest.TestCase ):
@property
def lowerCAmelCase ( self : Union[str, Any]):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Optional[int] = ort.SessionOptions()
__lowerCamelCase : Tuple = False
return options
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png')
__lowerCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png')
__lowerCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy')
# using the PNDM scheduler by default
__lowerCamelCase : Dict = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=SCREAMING_SNAKE_CASE__ ,feature_extractor=SCREAMING_SNAKE_CASE__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = 'A red cat sitting on a park bench'
__lowerCamelCase : Any = np.random.RandomState(0)
__lowerCamelCase : List[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE__ ,image=SCREAMING_SNAKE_CASE__ ,mask_image=SCREAMING_SNAKE_CASE__ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=1_5 ,generator=SCREAMING_SNAKE_CASE__ ,output_type='np' ,)
__lowerCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 113
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a = 1_6
_a = 3_2
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict = 16 ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
__lowerCAmelCase: Any = load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE : Any ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase: List[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase: Any = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase: Optional[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase: int = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase: Optional[int] = 16
elif accelerator.mixed_precision != "no":
__lowerCAmelCase: List[Any] = 8
else:
__lowerCAmelCase: Dict = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE , padding='longest' , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors='pt' , )
# Instantiate dataloaders.
__lowerCAmelCase: Union[str, Any] = DataLoader(
tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a = mocked_dataloaders # noqa: F811
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int ) -> List[Any]:
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , SCREAMING_SNAKE_CASE ) == "1":
__lowerCAmelCase: List[Any] = 2
# New Code #
__lowerCAmelCase: Union[str, Any] = int(args.gradient_accumulation_steps )
__lowerCAmelCase: List[str] = int(args.local_sgd_steps )
# Initialize accelerator
__lowerCAmelCase: Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=SCREAMING_SNAKE_CASE )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase: Any = config["""lr"""]
__lowerCAmelCase: List[str] = int(config['num_epochs'] )
__lowerCAmelCase: Any = int(config['seed'] )
__lowerCAmelCase: Dict = int(config['batch_size'] )
__lowerCAmelCase: Union[str, Any] = evaluate.load('glue' , 'mrpc' )
set_seed(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase: int = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase: List[Any] = model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase: Optional[Any] = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
# Instantiate scheduler
__lowerCAmelCase: int = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase: List[str] = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE ):
model.train()
with LocalSGD(
accelerator=SCREAMING_SNAKE_CASE , model=SCREAMING_SNAKE_CASE , local_sgd_steps=SCREAMING_SNAKE_CASE , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: str = model(**SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = output.loss
accelerator.backward(SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase: Optional[Any] = model(**SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase: List[Any] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
__lowerCAmelCase: Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE )
def _a ( ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=SCREAMING_SNAKE_CASE , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__lowerCAmelCase: Tuple = parser.parse_args()
__lowerCAmelCase: str = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 322
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
lowerCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase__ : Tuple = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
lowerCamelCase__ : Tuple = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str , **UpperCamelCase__: List[str] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: int , **UpperCamelCase__: Tuple ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : Tuple = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Optional[Any] = self.get_tokenizer()
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase__ : List[Any] = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
lowerCamelCase__ : Tuple = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : Any = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = self.prepare_image_inputs()
lowerCamelCase__ : List[str] = image_processor(UpperCamelCase__ , return_tensors="""np""" )
lowerCamelCase__ : Optional[Any] = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = """lower newer"""
lowerCamelCase__ : Union[str, Any] = processor(text=UpperCamelCase__ )
lowerCamelCase__ : Any = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Any = """lower newer"""
lowerCamelCase__ : Dict = self.prepare_image_inputs()
lowerCamelCase__ : Tuple = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(UpperCamelCase__ ):
processor()
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[str] = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Union[str, Any] = processor.batch_decode(UpperCamelCase__ )
lowerCamelCase__ : Dict = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = """lower newer"""
lowerCamelCase__ : str = self.prepare_image_inputs()
lowerCamelCase__ : int = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 41
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase ( __snake_case,__snake_case,unittest.TestCase ):
_a = StableDiffusionSAGPipeline
_a = TEXT_TO_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
_a = False
def a__ ( self ) -> List[str]:
torch.manual_seed(0 )
_A : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_A : int = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
_A : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_A : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_A : Any = CLIPTextModel(_a )
_A : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_A : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self , _a , _a=0 ) -> Tuple:
if str(_a ).startswith("""mps""" ):
_A : List[Any] = torch.manual_seed(_a )
else:
_A : Optional[int] = torch.Generator(device=_a ).manual_seed(_a )
_A : List[Any] = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> Dict:
_A : int = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
_A : Union[str, Any] = sag_pipe.to(_a )
sag_pipe.set_progress_bar_config(disable=_a )
_A : int = """."""
_A : Optional[Any] = torch.manual_seed(0 )
_A : List[Any] = sag_pipe(
[prompt] , generator=_a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
_A : List[str] = output.images
_A : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A : Dict = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def a__ ( self ) -> Any:
_A : Any = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
_A : Tuple = sag_pipe.to(_a )
sag_pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = """."""
_A : Tuple = torch.manual_seed(0 )
_A : Optional[int] = sag_pipe(
[prompt] , generator=_a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
_A : int = output.images
_A : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A : Optional[int] = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def a__ ( self ) -> List[Any]:
_A : str = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
_A : int = sag_pipe.to(_a )
sag_pipe.set_progress_bar_config(disable=_a )
_A : Dict = """."""
_A : Union[str, Any] = torch.manual_seed(0 )
_A : List[Any] = sag_pipe(
[prompt] , width=768 , height=512 , generator=_a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
_A : Dict = output.images
assert image.shape == (1, 512, 768, 3)
| 362
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_snake_case = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_snake_case = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_snake_case = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(UpperCamelCase__ )
class lowercase :
def __call__( self , _a , _a = None , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , **_a , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
elif titles is None or texts is None:
_A : Optional[Any] = titles if texts is None else texts
return super().__call__(
_a , _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
_A : Dict = titles if not isinstance(_a , _a ) else [titles]
_A : Tuple = texts if not isinstance(_a , _a ) else [texts]
_A : Any = len(_a )
_A : Optional[Any] = questions if not isinstance(_a , _a ) else [questions] * n_passages
if len(_a ) != len(_a ):
raise ValueError(
F'''There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.''' )
_A : str = super().__call__(_a , _a , padding=_a , truncation=_a )["""input_ids"""]
_A : Optional[int] = super().__call__(_a , add_special_tokens=_a , padding=_a , truncation=_a )["""input_ids"""]
_A : Optional[int] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_a , _a )
]
}
if return_attention_mask is not False:
_A : Any = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_A : str = attention_mask
return self.pad(_a , padding=_a , max_length=_a , return_tensors=_a )
def a__ ( self , _a , _a , _a = 16 , _a = 64 , _a = 4 , ) -> List[DPRSpanPrediction]:
_A : Dict = reader_input["""input_ids"""]
_A , _A , _A : Tuple = reader_output[:3]
_A : List[str] = len(_a )
_A : Tuple = sorted(range(_a ) , reverse=_a , key=relevance_logits.__getitem__ )
_A : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_A : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_A : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_A : Tuple = sequence_ids.index(self.pad_token_id )
else:
_A : Tuple = len(_a )
_A : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_a , top_spans=_a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_a , start_index=_a , end_index=_a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self , _a , _a , _a , _a , ) -> List[DPRSpanPrediction]:
_A : Tuple = []
for start_index, start_score in enumerate(_a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_A : Tuple = sorted(_a , key=lambda _a : x[1] , reverse=_a )
_A : Union[str, Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' )
_A : Dict = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase__ )
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = READER_PRETRAINED_VOCAB_FILES_MAP
_a = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = READER_PRETRAINED_INIT_CONFIGURATION
_a = ["input_ids", "attention_mask"]
| 343
| 0
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case_ ( __SCREAMING_SNAKE_CASE : bytes , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Any = F'''{sampling_rate}'''
lowercase_ : Union[str, Any] = '''1'''
lowercase_ : Optional[Any] = '''f32le'''
lowercase_ : Optional[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowercase_ : List[str] = ffmpeg_process.communicate(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
lowercase_ : int = output_stream[0]
lowercase_ : Dict = np.frombuffer(__SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : str = "f32le" , ):
"""simple docstring"""
lowercase_ : str = F'''{sampling_rate}'''
lowercase_ : Optional[Any] = '''1'''
if format_for_conversion == "s16le":
lowercase_ : Any = 2
elif format_for_conversion == "f32le":
lowercase_ : Optional[int] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowercase_ : List[Any] = platform.system()
if system == "Linux":
lowercase_ : Optional[int] = '''alsa'''
lowercase_ : Dict = '''default'''
elif system == "Darwin":
lowercase_ : int = '''avfoundation'''
lowercase_ : str = ''':0'''
elif system == "Windows":
lowercase_ : int = '''dshow'''
lowercase_ : Optional[Any] = '''default'''
lowercase_ : Dict = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
lowercase_ : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowercase_ : int = _ffmpeg_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[Union[Tuple[float, float], float]] = None , __SCREAMING_SNAKE_CASE : str = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
lowercase_ : List[str] = stream_chunk_s
else:
lowercase_ : Optional[int] = chunk_length_s
lowercase_ : List[Any] = ffmpeg_microphone(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , format_for_conversion=__SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
lowercase_ : Tuple = np.intaa
lowercase_ : Dict = 2
elif format_for_conversion == "f32le":
lowercase_ : List[Any] = np.floataa
lowercase_ : List[Any] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowercase_ : List[str] = chunk_length_s / 6
lowercase_ : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
lowercase_ : Tuple = [stride_length_s, stride_length_s]
lowercase_ : str = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowercase_ : int = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowercase_ : Tuple = datetime.datetime.now()
lowercase_ : Dict = datetime.timedelta(seconds=__SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=__SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
lowercase_ : int = np.frombuffer(item['''raw'''] , dtype=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
lowercase_ : Optional[Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple[int, int] , __SCREAMING_SNAKE_CASE : bool = False ):
"""simple docstring"""
lowercase_ : Optional[int] = B''''''
lowercase_ , lowercase_ : Dict = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowercase_ : Any = 0
for raw in iterator:
acc += raw
if stream and len(__SCREAMING_SNAKE_CASE ) < chunk_len:
lowercase_ : List[str] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
lowercase_ : Tuple = (_stride_left, stride_right)
lowercase_ : List[str] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
lowercase_ : int = False
yield item
lowercase_ : Optional[Any] = stride_left
lowercase_ : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__SCREAMING_SNAKE_CASE ) > stride_left:
lowercase_ : Union[str, Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
lowercase_ : Union[str, Any] = False
yield item
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : str = 2**24 # 16Mo
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=__SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
lowercase_ : Any = ffmpeg_process.stdout.read(__SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 93
|
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCAmelCase : Tuple = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def a__ ( a__ , a__ , a__ , a__ , a__=False , a__=True ):
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
__SCREAMING_SNAKE_CASE = config_class.from_json_file(a__ )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
print(F'Building TensorFlow model from configuration: {config}' )
__SCREAMING_SNAKE_CASE = model_class(a__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__SCREAMING_SNAKE_CASE = cached_file(
a__ , a__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__SCREAMING_SNAKE_CASE = load_pytorch_checkpoint_in_tfa_model(a__ , a__ )
if compare_with_pt_model:
__SCREAMING_SNAKE_CASE = tf_model(tf_model.dummy_inputs , training=a__ ) # build the network
__SCREAMING_SNAKE_CASE = torch.load(a__ , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE = pt_model_class.from_pretrained(
pretrained_model_name_or_path=a__ , config=a__ , state_dict=a__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = pt_model(**pt_model.dummy_inputs )
__SCREAMING_SNAKE_CASE = pto[0].numpy()
__SCREAMING_SNAKE_CASE = tfo[0].numpy()
__SCREAMING_SNAKE_CASE = np.amax(np.abs(np_pt - np_tf ) )
print(F'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, F'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(F'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(a__ , save_format="""h5""" )
def a__ ( a__ , a__ , a__=None , a__=None , a__=False , a__=False , a__=False , a__=False , ):
"""simple docstring"""
if args_model_type is None:
__SCREAMING_SNAKE_CASE = list(MODEL_CLASSES.keys() )
else:
__SCREAMING_SNAKE_CASE = [args_model_type]
for j, model_type in enumerate(a__ , start=1 ):
print("""=""" * 1_00 )
print(F' Converting model type {j}/{len(a__ )}: {model_type}' )
print("""=""" * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__SCREAMING_SNAKE_CASE = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__SCREAMING_SNAKE_CASE = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(a__ , a__ ) , start=1 ):
print("""-""" * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
__SCREAMING_SNAKE_CASE = model_shortcut_name
elif only_convert_finetuned_models:
print(F' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
F' Converting checkpoint {i}/{len(a__ )}: {model_shortcut_name} - model_type {model_type}' )
print("""-""" * 1_00 )
if config_shortcut_name in aws_config_map:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
else:
__SCREAMING_SNAKE_CASE = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
else:
__SCREAMING_SNAKE_CASE = model_shortcut_name
if os.path.isfile(a__ ):
__SCREAMING_SNAKE_CASE = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=a__ , pytorch_checkpoint_path=a__ , config_file=a__ , tf_dump_path=os.path.join(a__ , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=a__ , )
if remove_cached_files:
os.remove(a__ )
os.remove(a__ )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
UpperCAmelCase : List[Any] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 267
| 0
|
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = get_activation("""swish""" )
self.assertIsInstance(_lowerCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = get_activation("""silu""" )
self.assertIsInstance(_lowerCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = get_activation("""mish""" )
self.assertIsInstance(_lowerCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = get_activation("""gelu""" )
self.assertIsInstance(_lowerCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 362
|
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_A = """__DUMMY_TRANSFORMERS_USER__"""
_A = """Dummy User"""
_A = """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"""
_A = """https://hub-ci.huggingface.co"""
_A = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
_A = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
_A = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def a__ ( lowerCAmelCase ) -> Union[str, Any]:
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , lowerCAmelCase )
@pytest.fixture
def a__ ( lowerCAmelCase ) -> List[Any]:
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , lowerCAmelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , lowerCAmelCase )
@pytest.fixture
def a__ ( lowerCAmelCase ) -> List[Any]:
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , lowerCAmelCase )
@pytest.fixture
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
HfFolder.save_token(lowerCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def a__ ( ) -> List[str]:
return HfApi(endpoint=lowerCAmelCase )
@pytest.fixture(scope="""session""" )
def a__ ( lowerCAmelCase ) -> Union[str, Any]:
UpperCAmelCase__ : List[str] = HfFolder.get_token()
HfFolder.save_token(lowerCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(lowerCAmelCase )
@pytest.fixture
def a__ ( lowerCAmelCase ) -> List[str]:
def _cleanup_repo(lowerCAmelCase ):
hf_api.delete_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def a__ ( lowerCAmelCase ) -> Optional[Any]:
@contextmanager
def _temporary_repo(lowerCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(lowerCAmelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
UpperCAmelCase__ : str = F"""repo_txt_data-{int(time.time() * 10E3 )}"""
UpperCAmelCase__ : List[str] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" , private=lowerCAmelCase )
hf_api.upload_file(
token=lowerCAmelCase , path_or_fileobj=str(lowerCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=lowerCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int:
UpperCAmelCase__ : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10E3 )}"""
UpperCAmelCase__ : Any = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" , private=lowerCAmelCase )
hf_api.upload_file(
token=lowerCAmelCase , path_or_fileobj=str(lowerCAmelCase ) , path_in_repo="""data.zip""" , repo_id=lowerCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Dict:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
UpperCAmelCase__ : Union[str, Any] = F"""repo_zipped_img_data-{int(time.time() * 10E3 )}"""
UpperCAmelCase__ : Optional[int] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" , private=lowerCAmelCase )
hf_api.upload_file(
token=lowerCAmelCase , path_or_fileobj=str(lowerCAmelCase ) , path_in_repo="""data.zip""" , repo_id=lowerCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
return hf_private_dataset_repo_zipped_img_data_
| 166
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""]
_SCREAMING_SNAKE_CASE = """BlipImageProcessor"""
_SCREAMING_SNAKE_CASE = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = False
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = self.image_processor
def __call__( self : Union[str, Any] , UpperCamelCase__ : ImageInput = None , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : int , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
UpperCamelCase = self.tokenizer
UpperCamelCase = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
return text_encoding
# add pixel_values
UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ )
if text is not None:
UpperCamelCase = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
else:
UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase__ )
return encoding_image_processor
def A ( self : Tuple , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Any ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Union[str, Any] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Dict ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 28
|
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
snake_case = b * b - 4 * a * c
snake_case = (-b + sqrt(_UpperCamelCase )) / (2 * a)
snake_case = (-b - sqrt(_UpperCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCAmelCase__ ( ) -> int:
"""simple docstring"""
snake_case ,snake_case = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 150
| 0
|
"""simple docstring"""
from collections.abc import Sequence
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase = False ):
'''simple docstring'''
if not arr:
return 0
UpperCAmelCase = 0 if allow_empty_subarrays else float("""-inf""" )
UpperCAmelCase = 0.0
for num in arr:
UpperCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num )
UpperCAmelCase = max(lowerCAmelCase , lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase_ : List[str] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }')
| 248
|
"""simple docstring"""
def _lowerCAmelCase ( ):
'''simple docstring'''
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(lowerCAmelCase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 248
| 1
|
'''simple docstring'''
def __lowerCAmelCase ( ) -> int:
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(UpperCamelCase__ , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'{solution() = }')
| 67
|
def __lowerCAmelCase ( a__ ) -> str:
__a = []
__a = set({'''(''', '''[''', '''{'''} )
__a = set({''')''', ''']''', '''}'''} )
__a = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(a__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(a__ ) == 0 or (len(a__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(a__ ) == 0
def __lowerCAmelCase ( ) -> Dict:
__a = input('''Enter sequence of brackets: ''' )
if is_balanced(a__ ):
print(a__ , '''is balanced''' )
else:
print(a__ , '''is not balanced''' )
if __name__ == "__main__":
main()
| 6
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : List[Any] = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Tuple = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 354
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 7_6_8 , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : int = nn.Parameter(torch.zeros(1 , snake_case_ ) )
UpperCAmelCase_ : str = nn.Parameter(torch.ones(1 , snake_case_ ) )
def _UpperCamelCase ( self , snake_case_ = None , snake_case_ = None , ):
'''simple docstring'''
UpperCAmelCase_ : int = nn.Parameter(self.mean.to(snake_case_ ).to(snake_case_ ) )
UpperCAmelCase_ : Tuple = nn.Parameter(self.std.to(snake_case_ ).to(snake_case_ ) )
return self
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = (embeds * self.std) + self.mean
return embeds
| 274
| 0
|
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
lowercase_ = 0
lowercase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
lowercase_ = tuple[int, int]
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , ):
__a = pos_x
__a = pos_y
__a = (pos_y, pos_x)
__a = goal_x
__a = goal_y
__a = g_cost
__a = parent
__a = self.calculate_heuristic()
__a = self.g_cost + self.h_cost
def __UpperCAmelCase ( self ):
__a = self.pos_x - self.goal_x
__a = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_a ) + abs(_a )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , _a ):
return self.f_cost < other.f_cost
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a ):
__a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _a )
__a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , _a )
__a = [self.start]
__a = []
__a = False
def __UpperCAmelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__a = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_a )
self.closed_nodes.append(_a )
__a = self.get_successors(_a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_a )
else:
# retrieve the best current path
__a = self.open_nodes.pop(self.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_a )
else:
self.open_nodes.append(_a )
return [self.start.pos]
def __UpperCAmelCase ( self , _a ):
__a = []
for action in delta:
__a = parent.pos_x + action[1]
__a = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_a , _a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _a , ) )
return successors
def __UpperCAmelCase ( self , _a ):
__a = node
__a = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__a = current_node.parent
path.reverse()
return path
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a ):
__a = AStar(_a , _a )
__a = AStar(_a , _a )
__a = False
def __UpperCAmelCase ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__a = self.fwd_astar.open_nodes.pop(0 )
__a = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_a , _a )
self.fwd_astar.closed_nodes.append(_a )
self.bwd_astar.closed_nodes.append(_a )
__a = current_bwd_node
__a = current_fwd_node
__a = {
self.fwd_astar: self.fwd_astar.get_successors(_a ),
self.bwd_astar: self.bwd_astar.get_successors(_a ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_a )
else:
# retrieve the best current path
__a = astar.open_nodes.pop(
astar.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_a )
else:
astar.open_nodes.append(_a )
return [self.fwd_astar.start.pos]
def __UpperCAmelCase ( self , _a , _a ):
__a = self.fwd_astar.retrace_path(_a )
__a = self.bwd_astar.retrace_path(_a )
bwd_path.pop()
bwd_path.reverse()
__a = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
lowercase_ = (0, 0)
lowercase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowercase_ = time.time()
lowercase_ = AStar(init, goal)
lowercase_ = a_star.search()
lowercase_ = time.time() - start_time
print(F'''AStar execution time = {end_time:f} seconds''')
lowercase_ = time.time()
lowercase_ = BidirectionalAStar(init, goal)
lowercase_ = time.time() - bd_start_time
print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 45
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase : Tuple = 192
UpperCAmelCase : str = 768
UpperCAmelCase : List[Any] = 12
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : List[Any] = [800, 1333]
UpperCAmelCase : List[str] = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Union[str, Any] = 330
UpperCAmelCase : Union[str, Any] = 14
UpperCAmelCase : Any = 6
UpperCAmelCase : int = 1320
elif "yolos_s" in yolos_name:
UpperCAmelCase : Union[str, Any] = 384
UpperCAmelCase : Dict = 1536
UpperCAmelCase : str = 12
UpperCAmelCase : List[str] = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase : int = [800, 1344]
UpperCAmelCase : Optional[int] = 91
UpperCAmelCase : int = "huggingface/label-files"
UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json"
UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "backbone" in name:
UpperCAmelCase : int = name.replace("backbone" , "vit" )
if "cls_token" in name:
UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase : Any = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
UpperCAmelCase : str = key.split("." )
UpperCAmelCase : List[Any] = int(key_split[2] )
UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase : Optional[int] = val[:dim, :]
UpperCAmelCase : Union[str, Any] = val[
dim : dim * 2, :
]
UpperCAmelCase : Any = val[-dim:, :]
else:
UpperCAmelCase : Tuple = val[:dim]
UpperCAmelCase : List[str] = val[dim : dim * 2]
UpperCAmelCase : Any = val[-dim:]
else:
UpperCAmelCase : Union[str, Any] = val
return orig_state_dict
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ )
# load original state_dict
UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"]
# load 🤗 model
UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ )
model.eval()
UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512
UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ )
UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase : List[str] = model(**__magic_name__ )
UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes
UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase : str = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
UpperCAmelCase : Tuple = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
UpperCAmelCase : List[str] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase : List[str] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
UpperCAmelCase : Dict = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Dict = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
UpperCAmelCase : List[Any] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
UpperCAmelCase : str = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
UpperCAmelCase : int = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
UpperCAmelCase : Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(__magic_name__ , organization="hustvl" )
model.push_to_hub(__magic_name__ , organization="hustvl" )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a : str = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 311
| 0
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A__ ( __snake_case ):
_UpperCAmelCase :List[str] = 'char'
_UpperCAmelCase :List[str] = 'bpe'
_UpperCAmelCase :List[Any] = 'wp'
__lowerCamelCase : List[str] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A__ ( __snake_case ):
_UpperCAmelCase :Any = ['image_processor', 'char_tokenizer']
_UpperCAmelCase :Optional[Any] = 'ViTImageProcessor'
_UpperCAmelCase :List[Any] = 'MgpstrTokenizer'
def __init__( self , A_=None , A_=None , **A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , A_ , )
UpperCamelCase : Optional[Any] = kwargs.pop("feature_extractor" )
UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
UpperCamelCase : Dict = tokenizer
UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("gpt2" )
UpperCamelCase : int = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(A_ , A_ )
def __call__( self , A_=None , A_=None , A_=None , **A_ ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
UpperCamelCase : Tuple = self.image_processor(A_ , return_tensors=A_ , **A_ )
if text is not None:
UpperCamelCase : Optional[Any] = self.char_tokenizer(A_ , return_tensors=A_ , **A_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase : Union[str, Any] = encodings["input_ids"]
return inputs
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = sequences
UpperCamelCase : Dict = char_preds.size(0 )
UpperCamelCase : List[Any] = self._decode_helper(A_ , "char" )
UpperCamelCase : List[str] = self._decode_helper(A_ , "bpe" )
UpperCamelCase : Optional[int] = self._decode_helper(A_ , "wp" )
UpperCamelCase : List[Any] = []
UpperCamelCase : int = []
for i in range(A_ ):
UpperCamelCase : Any = [char_scores[i], bpe_scores[i], wp_scores[i]]
UpperCamelCase : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
UpperCamelCase : Tuple = scores.index(max(A_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
UpperCamelCase : List[Any] = {}
UpperCamelCase : Dict = final_strs
UpperCamelCase : List[str] = final_scores
UpperCamelCase : str = char_strs
UpperCamelCase : Union[str, Any] = bpe_strs
UpperCamelCase : Tuple = wp_strs
return out
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
UpperCamelCase : Tuple = self.char_decode
UpperCamelCase : str = 1
UpperCamelCase : List[Any] = "[s]"
elif format == DecodeType.BPE:
UpperCamelCase : Any = self.bpe_decode
UpperCamelCase : Any = 2
UpperCamelCase : Optional[int] = "#"
elif format == DecodeType.WORDPIECE:
UpperCamelCase : Tuple = self.wp_decode
UpperCamelCase : Any = 102
UpperCamelCase : Any = "[SEP]"
else:
raise ValueError(F"""Format {format} is not supported.""" )
UpperCamelCase : Dict = [], []
UpperCamelCase : Dict = pred_logits.size(0 )
UpperCamelCase : Dict = pred_logits.size(1 )
UpperCamelCase : List[Any] = pred_logits.topk(1 , dim=-1 , largest=A_ , sorted=A_ )
UpperCamelCase : Optional[Any] = preds_index.view(-1 , A_ )[:, 1:]
UpperCamelCase : Optional[int] = decoder(A_ )
UpperCamelCase : int = torch.nn.functional.softmax(A_ , dim=2 ).max(dim=2 )
UpperCamelCase : int = preds_max_prob[:, 1:]
for index in range(A_ ):
UpperCamelCase : Optional[Any] = preds_str[index].find(A_ )
UpperCamelCase : Dict = preds_str[index][:pred_eos]
UpperCamelCase : Union[str, Any] = preds_index[index].cpu().tolist()
UpperCamelCase : int = pred_index.index(A_ ) if eos_token in pred_index else -1
UpperCamelCase : List[Any] = preds_max_prob[index][: pred_eos_index + 1]
UpperCamelCase : Any = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A_ )
conf_scores.append(A_ )
return dec_strs, conf_scores
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Any = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(A_ )]
return decode_strs
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Any = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(A_ )]
return decode_strs
| 362
|
def A_ ( _lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCamelCase : Union[str, Any] = set()
# Replace all the whitespace in our sentence
UpperCamelCase : Union[str, Any] = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_lowerCAmelCase ) == 26
def A_ ( _lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCamelCase : List[Any] = [False] * 26
for char in input_str:
if char.islower():
UpperCamelCase : Tuple = True
elif char.isupper():
UpperCamelCase : str = True
return all(_lowerCAmelCase )
def A_ ( _lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def A_ ( ) -> None:
from timeit import timeit
UpperCamelCase : Tuple = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=_lowerCAmelCase ) )
print(timeit("is_pangram_faster()" , setup=_lowerCAmelCase ) )
print(timeit("is_pangram_fastest()" , setup=_lowerCAmelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 140
| 0
|
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''', ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''', ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''', [None, '''v2'''] )
def snake_case_ ( A_ : Any, A_ : str, A_ : int ):
'''simple docstring'''
_lowerCamelCase : str = hf_hub_url(repo_id=A_, path=A_, revision=A_ )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(A_ )}'''
| 72
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 303
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/resolve/main/config.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/config.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/config.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json""",
}
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = "bloom"
a__ = ["past_key_values"]
a__ = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : List[Any] , __lowerCamelCase : Optional[int]=25_08_80 , __lowerCamelCase : Any=64 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Optional[int]=8 , __lowerCamelCase : Any=1e-5 , __lowerCamelCase : int=0.02 , __lowerCamelCase : int=True , __lowerCamelCase : Any=1 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Any=False , __lowerCamelCase : List[str]=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : Any=False , **__lowerCamelCase : int , ) -> Optional[int]:
A : Optional[Any] = vocab_size
# Backward compatibility with n_embed kwarg
A : int = kwargs.pop("n_embed" , __lowerCamelCase )
A : int = hidden_size if n_embed is None else n_embed
A : int = n_layer
A : Tuple = n_head
A : Tuple = layer_norm_epsilon
A : int = initializer_range
A : List[Any] = use_cache
A : Tuple = pretraining_tp
A : int = apply_residual_connection_post_layernorm
A : str = hidden_dropout
A : Any = attention_dropout
A : Union[str, Any] = bos_token_id
A : str = eos_token_id
A : List[str] = slow_but_exact
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = version.parse("1.12" )
def __init__( self : Union[str, Any] , __lowerCamelCase : PretrainedConfig , __lowerCamelCase : str = "default" , __lowerCamelCase : List[PatchingSpec] = None , __lowerCamelCase : bool = False , ) -> Union[str, Any]:
super().__init__(__lowerCamelCase , task=__lowerCamelCase , patching_specs=__lowerCamelCase , use_past=__lowerCamelCase )
if not getattr(self._config , "pad_token_id" , __lowerCamelCase ):
# TODO: how to do that better?
A : Tuple = 0
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
A : Optional[int] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" , inverted_values_shape=__lowerCamelCase )
A : int = {0: "batch", 1: "past_sequence + sequence"}
else:
A : Any = {0: "batch", 1: "sequence"}
return common_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
return self._config.n_head
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> float:
return 1e-3
def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : "PreTrainedTokenizer" , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
A : Optional[Any] = super(__lowerCamelCase , self ).generate_dummy_inputs(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
# We need to order the input in the way they appears in the forward()
A : Optional[int] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A : Optional[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A : str = seqlen + 2
A : Any = self._config.hidden_size // self.num_attention_heads
A : Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
A : List[Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
A : List[Any] = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(self.num_layers )
]
A : Union[str, Any] = common_inputs["attention_mask"]
if self.use_past:
A : str = ordered_inputs["attention_mask"].dtype
A : Tuple = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
return 13
| 359
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 256
| 0
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _snake_case ( _snake_case : Any , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Any , _snake_case : List[Any] ):
with open(__lowercase ) as metadata_file:
lowerCAmelCase : Optional[int] = json.load(__lowercase )
lowerCAmelCase : Any = LukeConfig(use_entity_aware_attention=__lowercase , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
lowerCAmelCase : Union[str, Any] = torch.load(__lowercase , map_location='''cpu''' )['''module''']
# Load the entity vocab file
lowerCAmelCase : int = load_original_entity_vocab(__lowercase )
# add an entry for [MASK2]
lowerCAmelCase : List[str] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowerCAmelCase : Any = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCAmelCase : List[str] = AddedToken('''<ent>''' , lstrip=__lowercase , rstrip=__lowercase )
lowerCAmelCase : List[Any] = AddedToken('''<ent2>''' , lstrip=__lowercase , rstrip=__lowercase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , '''r''' ) as f:
lowerCAmelCase : List[Any] = json.load(__lowercase )
lowerCAmelCase : int = '''MLukeTokenizer'''
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(__lowercase , __lowercase )
with open(os.path.join(__lowercase , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(__lowercase , __lowercase )
lowerCAmelCase : Tuple = MLukeTokenizer.from_pretrained(__lowercase )
# Initialize the embeddings of the special tokens
lowerCAmelCase : str = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
lowerCAmelCase : int = state_dict['''embeddings.word_embeddings.weight''']
lowerCAmelCase : Tuple = word_emb[ent_init_index].unsqueeze(0 )
lowerCAmelCase : List[Any] = word_emb[enta_init_index].unsqueeze(0 )
lowerCAmelCase : str = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowerCAmelCase : Tuple = state_dict[bias_name]
lowerCAmelCase : List[str] = decoder_bias[ent_init_index].unsqueeze(0 )
lowerCAmelCase : List[str] = decoder_bias[enta_init_index].unsqueeze(0 )
lowerCAmelCase : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCAmelCase : List[Any] = f'''encoder.layer.{layer_index}.attention.self.'''
lowerCAmelCase : List[str] = state_dict[prefix + matrix_name]
lowerCAmelCase : Any = state_dict[prefix + matrix_name]
lowerCAmelCase : Union[str, Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCAmelCase : Optional[Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
lowerCAmelCase : int = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
lowerCAmelCase : str = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowerCAmelCase : Optional[Any] = state_dict['''entity_predictions.bias''']
lowerCAmelCase : Dict = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
lowerCAmelCase : Any = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowerCAmelCase : int = LukeForMaskedLM(config=__lowercase ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
lowerCAmelCase : Dict = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
lowerCAmelCase : Optional[int] = state_dict[key]
else:
lowerCAmelCase : Tuple = state_dict[key]
lowerCAmelCase, lowerCAmelCase : Optional[Any] = model.load_state_dict(__lowercase , strict=__lowercase )
if set(__lowercase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(__lowercase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowerCAmelCase : Tuple = MLukeTokenizer.from_pretrained(__lowercase , task='''entity_classification''' )
lowerCAmelCase : Union[str, Any] = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
lowerCAmelCase : List[Any] = (0, 9)
lowerCAmelCase : Tuple = tokenizer(__lowercase , entity_spans=[span] , return_tensors='''pt''' )
lowerCAmelCase : Union[str, Any] = model(**__lowercase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCAmelCase : Dict = torch.Size((1, 33, 768) )
lowerCAmelCase : Optional[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCAmelCase : int = torch.Size((1, 1, 768) )
lowerCAmelCase : List[str] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowercase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
lowerCAmelCase : Optional[int] = MLukeTokenizer.from_pretrained(__lowercase )
lowerCAmelCase : List[str] = '''Tokyo is the capital of <mask>.'''
lowerCAmelCase : Dict = (24, 30)
lowerCAmelCase : Optional[Any] = tokenizer(__lowercase , entity_spans=[span] , return_tensors='''pt''' )
lowerCAmelCase : str = model(**__lowercase )
lowerCAmelCase : Tuple = encoding['''input_ids'''][0].tolist()
lowerCAmelCase : Tuple = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
lowerCAmelCase : Optional[Any] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__lowercase )
lowerCAmelCase : Tuple = outputs.entity_logits[0][0].argmax().item()
lowerCAmelCase : List[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__lowercase ) )
model.save_pretrained(__lowercase )
def _snake_case ( _snake_case : List[Any] ):
lowerCAmelCase : int = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
lowerCAmelCase : Optional[int] = [json.loads(__lowercase ) for line in open(__lowercase )]
lowerCAmelCase : List[str] = {}
for entry in data:
lowerCAmelCase : Optional[int] = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowerCAmelCase : str = entity_id
break
lowerCAmelCase : List[str] = f'''{language}:{entity_name}'''
lowerCAmelCase : Optional[int] = entity_id
return new_mapping
if __name__ == "__main__":
snake_case__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
snake_case__ : Optional[int] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 60
|
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , __UpperCAmelCase : bool , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None ):
'''simple docstring'''
super().__init__()
_A = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_A = torch.zeros(__UpperCAmelCase , __UpperCAmelCase )
else:
_A = None
_A = torch.nn.Parameter(__UpperCAmelCase )
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = 42
def __init__( self : Any , __UpperCAmelCase : VQModel , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : TransformeraDModel , __UpperCAmelCase : VQDiffusionScheduler , __UpperCAmelCase : LearnedClassifierFreeSamplingEmbeddings , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=__UpperCAmelCase , transformer=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , scheduler=__UpperCAmelCase , learned_classifier_free_sampling_embeddings=__UpperCAmelCase , )
def lowerCAmelCase ( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ):
'''simple docstring'''
_A = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else 1
# get prompt text embeddings
_A = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
_A = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_A = text_input_ids[:, : self.tokenizer.model_max_length]
_A = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_A = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
_A = prompt_embeds.repeat_interleave(__UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_A = self.learned_classifier_free_sampling_embeddings.embeddings
_A = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCAmelCase , 1 , 1 )
else:
_A = [""] * batch_size
_A = text_input_ids.shape[-1]
_A = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , )
_A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_A = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_A = negative_prompt_embeds.shape[1]
_A = negative_prompt_embeds.repeat(1 , __UpperCAmelCase , 1 )
_A = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Optional[Any] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 100 , __UpperCAmelCase : float = 5.0 , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = 1
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = len(__UpperCAmelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}''' )
_A = batch_size * num_images_per_prompt
_A = guidance_scale > 1.0
_A = self._encode_prompt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__UpperCAmelCase )}.''' )
# get the initial completely masked latents unless the user supplied it
_A = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_A = self.transformer.num_vector_embeds - 1
_A = torch.full(__UpperCAmelCase , __UpperCAmelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
_A = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCAmelCase , device=self.device )
_A = self.scheduler.timesteps.to(self.device )
_A = latents
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the sample if we are doing classifier free guidance
_A = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_A = self.transformer(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase ).sample
if do_classifier_free_guidance:
_A , _A = model_output.chunk(2 )
_A = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__UpperCAmelCase , dim=1 , keepdim=__UpperCAmelCase )
_A = self.truncate(__UpperCAmelCase , __UpperCAmelCase )
# remove `log(0)`'s (`-inf`s)
_A = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_A = self.vqvae.config.vq_embed_dim
_A = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_A = self.vqvae.quantize.get_codebook_entry(__UpperCAmelCase , shape=__UpperCAmelCase )
_A = self.vqvae.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase ).sample
_A = (image / 2 + 0.5).clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : float ):
'''simple docstring'''
_A , _A = torch.sort(__UpperCAmelCase , 1 , descending=__UpperCAmelCase )
_A = torch.exp(__UpperCAmelCase )
_A = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_A = torch.full_like(keep_mask[:, 0:1, :] , __UpperCAmelCase )
_A = torch.cat((all_true, keep_mask) , dim=1 )
_A = keep_mask[:, :-1, :]
_A = keep_mask.gather(1 , indices.argsort(1 ) )
_A = log_p_x_0.clone()
_A = -torch.inf # -inf = log(0)
return rv
| 79
| 0
|
def lowerCamelCase_ ( ):
lowerCamelCase_ = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
lowerCamelCase_ = 6
lowerCamelCase_ = 1
lowerCamelCase_ = 1_9_0_1
lowerCamelCase_ = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowerCamelCase_ = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
lowerCamelCase_ = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
lowerCamelCase_ = day - days_per_month[month - 2]
if month > 1_2:
year += 1
lowerCamelCase_ = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 357
|
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__A ={
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = {}
state_dict.pop("pixel_mean" , lowerCamelCase__ )
state_dict.pop("pixel_std" , lowerCamelCase__ )
lowerCamelCase_ = r".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase_ = key.replace(lowerCamelCase__ , lowerCamelCase__ )
if re.match(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = int(re.match(lowerCamelCase__ , lowerCamelCase__ ).group(2 ) )
if layer_nb == 0:
lowerCamelCase_ = key.replace("layers.0" , "proj_in" )
elif layer_nb == 1:
lowerCamelCase_ = key.replace("layers.1" , "layers.0" )
elif layer_nb == 2:
lowerCamelCase_ = key.replace("layers.2" , "proj_out" )
lowerCamelCase_ = value
lowerCamelCase_ = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="ybelkada/segment-anything" ):
lowerCamelCase_ = hf_hub_download(lowerCamelCase__ , F'checkpoints/{model_name}.pth' )
if "sam_vit_b" in model_name:
lowerCamelCase_ = SamConfig()
elif "sam_vit_l" in model_name:
lowerCamelCase_ = SamVisionConfig(
hidden_size=1_0_2_4 , num_hidden_layers=2_4 , num_attention_heads=1_6 , global_attn_indexes=[5, 1_1, 1_7, 2_3] , )
lowerCamelCase_ = SamConfig(
vision_config=lowerCamelCase__ , )
elif "sam_vit_h" in model_name:
lowerCamelCase_ = SamVisionConfig(
hidden_size=1_2_8_0 , num_hidden_layers=3_2 , num_attention_heads=1_6 , global_attn_indexes=[7, 1_5, 2_3, 3_1] , )
lowerCamelCase_ = SamConfig(
vision_config=lowerCamelCase__ , )
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )
lowerCamelCase_ = replace_keys(lowerCamelCase__ )
lowerCamelCase_ = SamImageProcessor()
lowerCamelCase_ = SamProcessor(image_processor=lowerCamelCase__ )
lowerCamelCase_ = SamModel(lowerCamelCase__ )
hf_model.load_state_dict(lowerCamelCase__ )
lowerCamelCase_ = hf_model.to("cuda" )
lowerCamelCase_ = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("RGB" )
lowerCamelCase_ = [[[4_0_0, 6_5_0]]]
lowerCamelCase_ = [[1]]
lowerCamelCase_ = processor(images=np.array(lowerCamelCase__ ) , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase_ = hf_model(**lowerCamelCase__ )
lowerCamelCase_ = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
lowerCamelCase_ = processor(
images=np.array(lowerCamelCase__ ) , input_points=lowerCamelCase__ , input_labels=lowerCamelCase__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase_ = hf_model(**lowerCamelCase__ )
lowerCamelCase_ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
lowerCamelCase_ = ((7_5, 2_7_5, 1_7_2_5, 8_5_0),)
lowerCamelCase_ = processor(images=np.array(lowerCamelCase__ ) , input_boxes=lowerCamelCase__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase_ = hf_model(**lowerCamelCase__ )
lowerCamelCase_ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
lowerCamelCase_ = [[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]]
lowerCamelCase_ = [[1, 1]]
lowerCamelCase_ = processor(
images=np.array(lowerCamelCase__ ) , input_points=lowerCamelCase__ , input_labels=lowerCamelCase__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase_ = hf_model(**lowerCamelCase__ )
lowerCamelCase_ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__A =argparse.ArgumentParser()
__A =['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
__A =parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 47
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = XGLMConfig
SCREAMING_SNAKE_CASE_ : int = {}
SCREAMING_SNAKE_CASE_ : List[str] = """gelu"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=14 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=0.02 , ) -> List[str]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = ffn_dim
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 1
def __A ( self ) -> Tuple:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = self.get_config()
SCREAMING_SNAKE_CASE = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __A ( self ) -> Any:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCAmelCase__ , )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = TFXGLMModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 )
def __A ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@slow
def __A ( self ) -> str:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFXGLMModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def __A ( self ) -> Dict:
super().test_resize_token_embeddings()
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self , lowerCAmelCase__=True ) -> int:
SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
SCREAMING_SNAKE_CASE = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
SCREAMING_SNAKE_CASE = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase__ )
@slow
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE = tokenizer('Today is a nice day and' , return_tensors='tf' )
SCREAMING_SNAKE_CASE = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
SCREAMING_SNAKE_CASE = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , seed=[7, 0] )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = 'left'
# use different length sentences to test batching
SCREAMING_SNAKE_CASE = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors='tf' , padding=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = inputs['input_ids']
SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
SCREAMING_SNAKE_CASE = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=12 )
SCREAMING_SNAKE_CASE = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=12 )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] )
| 113
|
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__UpperCamelCase = HfApi()
__UpperCamelCase = {}
# fmt: off
__UpperCamelCase = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
__UpperCamelCase = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
__UpperCamelCase = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
__UpperCamelCase = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
__UpperCamelCase = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
__UpperCamelCase = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
__UpperCamelCase = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
__UpperCamelCase = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
__UpperCamelCase = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
__UpperCamelCase = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
__UpperCamelCase = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
__UpperCamelCase = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
__UpperCamelCase = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
__UpperCamelCase = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
__UpperCamelCase = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
__UpperCamelCase = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__UpperCamelCase = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith('''CompVis'''):
__UpperCamelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
__UpperCamelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__UpperCamelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__UpperCamelCase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
__UpperCamelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(f'''{mod.modelId} has passed successfully!!!''')
| 113
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: Tuple ={'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Tuple =['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =[
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 353
|
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
UpperCAmelCase_ = Dataset.from_dict(snake_case_ )
return dataset
class __A ( UpperCamelCase__ ):
def _lowercase (self : str ):
UpperCAmelCase_ = get_dataset()
UpperCAmelCase_ = make_duplicate_clusters(__a , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = get_dataset()
UpperCAmelCase_ , UpperCAmelCase_ = deduplicate_dataset(__a )
self.assertEqual(len(__a ) , 2 )
print(__a )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , __a )
| 106
| 0
|
from __future__ import annotations
import pandas as pd
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = [0] * no_of_processes
__magic_name__ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(A_ ):
__magic_name__ = burst_time[i]
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = 999999999
__magic_name__ = 0
__magic_name__ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(A_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__magic_name__ = remaining_time[j]
__magic_name__ = j
__magic_name__ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__magic_name__ = remaining_time[short]
if minm == 0:
__magic_name__ = 999999999
if remaining_time[short] == 0:
complete += 1
__magic_name__ = False
# Find finish time of current process
__magic_name__ = increment_time + 1
# Calculate waiting time
__magic_name__ = finish_time - arrival_time[short]
__magic_name__ = finar - burst_time[short]
if waiting_time[short] < 0:
__magic_name__ = 0
# Increment time
increment_time += 1
return waiting_time
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = [0] * no_of_processes
for i in range(A_ ):
__magic_name__ = burst_time[i] + waiting_time[i]
return turn_around_time
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = 0
__magic_name__ = 0
for i in range(A_ ):
__magic_name__ = total_waiting_time + waiting_time[i]
__magic_name__ = total_turn_around_time + turn_around_time[i]
print(f'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("""Average turn around time =""", total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
__lowerCAmelCase : Union[str, Any] = int(input())
__lowerCAmelCase : Tuple = [0] * no_of_processes
__lowerCAmelCase : Tuple = [0] * no_of_processes
__lowerCAmelCase : List[str] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
__lowerCAmelCase , __lowerCAmelCase : List[Any] = map(int, input().split())
__lowerCAmelCase : Tuple = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase : int = burst_time
__lowerCAmelCase : Any = no_of_processes
__lowerCAmelCase : Dict = waiting_time
__lowerCAmelCase : str = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__lowerCAmelCase : str = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 88
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88
| 1
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE :Dict = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Any:
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> List[str]:
"""simple docstring"""
return max(metric_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for gt in ground_truths )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Dict:
"""simple docstring"""
UpperCamelCase_ = [line.strip() for line in open(SCREAMING_SNAKE_CASE_ , "r" ).readlines()]
UpperCamelCase_ = []
if args.gold_data_mode == "qa":
UpperCamelCase_ = pd.read_csv(SCREAMING_SNAKE_CASE_ , sep="\t" , header=SCREAMING_SNAKE_CASE_ )
for answer_list in data[1]:
UpperCamelCase_ = ast.literal_eval(SCREAMING_SNAKE_CASE_ )
answers.append(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase_ = [line.strip() for line in open(SCREAMING_SNAKE_CASE_ , "r" ).readlines()]
UpperCamelCase_ = [[reference] for reference in references]
UpperCamelCase_ = UpperCamelCase_ = UpperCamelCase_ = 0
for prediction, ground_truths in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
total += 1
em += metric_max_over_ground_truths(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
fa += metric_max_over_ground_truths(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = 100.0 * em / total
UpperCamelCase_ = 100.0 * fa / total
logger.info(f"F1: {fa:.2f}" )
logger.info(f"EM: {em:.2f}" )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = args.k
UpperCamelCase_ = [line.strip() for line in open(SCREAMING_SNAKE_CASE_ , "r" ).readlines()]
UpperCamelCase_ = [line.strip() for line in open(SCREAMING_SNAKE_CASE_ , "r" ).readlines()]
UpperCamelCase_ = UpperCamelCase_ = 0
for hypo, reference in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = set(hypo.split("\t" )[:k] )
UpperCamelCase_ = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
UpperCamelCase_ = 100.0 * em / total
logger.info(f"Precision@{k}: {em: .2f}" )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]:
"""simple docstring"""
def strip_title(SCREAMING_SNAKE_CASE_ ):
if title.startswith("\"" ):
UpperCamelCase_ = title[1:]
if title.endswith("\"" ):
UpperCamelCase_ = title[:-1]
return title
UpperCamelCase_ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
SCREAMING_SNAKE_CASE_ , return_tensors="pt" , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , )["input_ids"].to(args.device )
UpperCamelCase_ = rag_model.rag.question_encoder(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = question_enc_outputs[0]
UpperCamelCase_ = rag_model.retriever(
SCREAMING_SNAKE_CASE_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
UpperCamelCase_ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
UpperCamelCase_ = []
for docs in all_docs:
UpperCamelCase_ = [strip_title(SCREAMING_SNAKE_CASE_ ) for title in docs["title"]]
provenance_strings.append("\t".join(SCREAMING_SNAKE_CASE_ ) )
return provenance_strings
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str:
"""simple docstring"""
with torch.no_grad():
UpperCamelCase_ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
SCREAMING_SNAKE_CASE_ , return_tensors="pt" , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = inputs_dict.input_ids.to(args.device )
UpperCamelCase_ = inputs_dict.attention_mask.to(args.device )
UpperCamelCase_ = rag_model.generate( # rag_model overwrites generate
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=SCREAMING_SNAKE_CASE_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
UpperCamelCase_ = rag_model.retriever.generator_tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
if args.print_predictions:
for q, a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
logger.info("Q: {} - A: {}".format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
return answers
def lowerCAmelCase( )-> List[str]:
"""simple docstring"""
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=SCREAMING_SNAKE_CASE_ , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=SCREAMING_SNAKE_CASE_ , choices=["exact", "compressed", "legacy"] , type=SCREAMING_SNAKE_CASE_ , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=SCREAMING_SNAKE_CASE_ , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=SCREAMING_SNAKE_CASE_ , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=SCREAMING_SNAKE_CASE_ , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=SCREAMING_SNAKE_CASE_ , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=SCREAMING_SNAKE_CASE_ , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=SCREAMING_SNAKE_CASE_ , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=SCREAMING_SNAKE_CASE_ , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=SCREAMING_SNAKE_CASE_ , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=5_0 , type=SCREAMING_SNAKE_CASE_ , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> str:
"""simple docstring"""
UpperCamelCase_ = {}
if args.model_type is None:
UpperCamelCase_ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
UpperCamelCase_ = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
UpperCamelCase_ = args.n_docs
if args.index_name is not None:
UpperCamelCase_ = args.index_name
if args.index_path is not None:
UpperCamelCase_ = args.index_path
else:
UpperCamelCase_ = BartForConditionalGeneration
UpperCamelCase_ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = get_scores if args.eval_mode == "e2e" else get_precision_at_k
UpperCamelCase_ = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(SCREAMING_SNAKE_CASE_ , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(SCREAMING_SNAKE_CASE_ ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
UpperCamelCase_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , retriever=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
model.retriever.init_retrieval()
else:
UpperCamelCase_ = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
UpperCamelCase_ = []
for line in tqdm(SCREAMING_SNAKE_CASE_ ):
questions.append(line.strip() )
if len(SCREAMING_SNAKE_CASE_ ) == args.eval_batch_size:
UpperCamelCase_ = evaluate_batch_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
preds_file.write("\n".join(SCREAMING_SNAKE_CASE_ ) + "\n" )
preds_file.flush()
UpperCamelCase_ = []
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase_ = evaluate_batch_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
preds_file.write("\n".join(SCREAMING_SNAKE_CASE_ ) )
preds_file.flush()
score_fn(SCREAMING_SNAKE_CASE_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :List[Any] = get_args()
main(args)
| 60
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Union[str, Any] = {
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class __magic_name__ ( snake_case ):
UpperCamelCase_ :List[Any] = """bridgetower_vision_model"""
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=3 , _lowercase=16 , _lowercase=288 , _lowercase=1 , _lowercase=1e-0_5 , _lowercase=False , _lowercase=True , _lowercase=False , **_lowercase , )-> Optional[Any]:
super().__init__(**_lowercase )
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_channels
UpperCamelCase_ = patch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = initializer_factor
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = stop_gradient
UpperCamelCase_ = share_layernorm
UpperCamelCase_ = remove_last_layer
@classmethod
def UpperCAmelCase_ ( cls , _lowercase , **_lowercase )-> "PretrainedConfig":
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(_lowercase , **_lowercase )
if config_dict.get("model_type" ) == "bridgetower":
UpperCamelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowercase , **_lowercase )
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Optional[int] = """bridgetower_text_model"""
def __init__( self , _lowercase=50_265 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=1 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=514 , _lowercase=1 , _lowercase=1e-0_5 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=True , **_lowercase , )-> Optional[int]:
super().__init__(**_lowercase )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = initializer_factor
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = use_cache
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
UpperCamelCase_ = eos_token_id
@classmethod
def UpperCAmelCase_ ( cls , _lowercase , **_lowercase )-> "PretrainedConfig":
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(_lowercase , **_lowercase )
if config_dict.get("model_type" ) == "bridgetower":
UpperCamelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowercase , **_lowercase )
class __magic_name__ ( snake_case ):
UpperCamelCase_ :List[Any] = """bridgetower"""
def __init__( self , _lowercase=True , _lowercase="gelu" , _lowercase=768 , _lowercase=1 , _lowercase=1e-0_5 , _lowercase=False , _lowercase="add" , _lowercase=12 , _lowercase=6 , _lowercase=False , _lowercase=False , _lowercase=None , _lowercase=None , **_lowercase , )-> List[Any]:
# TODO: remove this once the Hub files are updated.
UpperCamelCase_ = kwargs.pop("text_config_dict" , _lowercase )
UpperCamelCase_ = kwargs.pop("vision_config_dict" , _lowercase )
super().__init__(**_lowercase )
UpperCamelCase_ = share_cross_modal_transformer_layers
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_size
UpperCamelCase_ = initializer_factor
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = share_link_tower_layers
UpperCamelCase_ = link_tower_type
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = tie_word_embeddings
UpperCamelCase_ = init_layernorm_from_vision_encoder
if text_config is None:
UpperCamelCase_ = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
UpperCamelCase_ = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
UpperCamelCase_ = BridgeTowerTextConfig(**_lowercase )
UpperCamelCase_ = BridgeTowerVisionConfig(**_lowercase )
@classmethod
def UpperCAmelCase_ ( cls , _lowercase , _lowercase , **_lowercase )-> List[str]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowercase )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.text_config.to_dict()
UpperCamelCase_ = self.vision_config.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
| 60
| 1
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a__ : Any ='''pt'''
elif is_tf_available():
a__ : Tuple ='''tf'''
else:
a__ : List[str] ='''jax'''
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =PerceiverTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] =False
def _lowerCamelCase ( self : int ):
super().setUp()
__UpperCamelCase = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCamelCase ( self : Optional[Any] ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def _lowerCamelCase ( self : str , **__A : Tuple ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__A )
def _lowerCamelCase ( self : List[str] , __A : Union[str, Any] , __A : Dict=False , __A : Any=2_0 , __A : Tuple=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__UpperCamelCase = []
for i in range(len(__A ) ):
try:
__UpperCamelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=__A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__UpperCamelCase = list(filter(lambda __A : re.match(R'^[ a-zA-Z]+$' , t[1] ) , __A ) )
__UpperCamelCase = list(filter(lambda __A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__A ) , __A ) )
if max_length is not None and len(__A ) > max_length:
__UpperCamelCase = toks[:max_length]
if min_length is not None and len(__A ) < min_length and len(__A ) > 0:
while len(__A ) < min_length:
__UpperCamelCase = toks + toks
# toks_str = [t[1] for t in toks]
__UpperCamelCase = [t[0] for t in toks]
# Ensure consistency
__UpperCamelCase = tokenizer.decode(__A , clean_up_tokenization_spaces=__A )
if " " not in output_txt and len(__A ) > 1:
__UpperCamelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__A )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__A )
)
if with_prefix_space:
__UpperCamelCase = ' ' + output_txt
__UpperCamelCase = tokenizer.encode(__A , add_special_tokens=__A )
return output_txt, output_ids
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = self.perceiver_tokenizer
__UpperCamelCase = 'Unicode €.'
__UpperCamelCase = tokenizer(__A )
__UpperCamelCase = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'] , __A )
# decoding
__UpperCamelCase = tokenizer.decode(__A )
self.assertEqual(__A , '[CLS]Unicode €.[SEP]' )
__UpperCamelCase = tokenizer('e è é ê ë' )
__UpperCamelCase = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'] , __A )
# decoding
__UpperCamelCase = tokenizer.decode(__A )
self.assertEqual(__A , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = self.perceiver_tokenizer
__UpperCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__UpperCamelCase = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
__UpperCamelCase = tokenizer(__A , padding=__A , return_tensors=__A )
self.assertIsInstance(__A , __A )
if FRAMEWORK != "jax":
__UpperCamelCase = list(batch.input_ids.numpy()[0] )
else:
__UpperCamelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__A , __A )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.perceiver_tokenizer
__UpperCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__UpperCamelCase = tokenizer(__A , padding=__A , return_tensors=__A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , __A )
self.assertIn('attention_mask' , __A )
self.assertNotIn('decoder_input_ids' , __A )
self.assertNotIn('decoder_attention_mask' , __A )
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = self.perceiver_tokenizer
__UpperCamelCase = [
'Summary of the text.',
'Another summary.',
]
__UpperCamelCase = tokenizer(
text_target=__A , max_length=3_2 , padding='max_length' , truncation=__A , return_tensors=__A )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
def _lowerCamelCase ( self : Dict ):
# safety check on max_len default value so we are sure the test works
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = ' He is very happy, UNwant\u00E9d,running'
__UpperCamelCase = tokenizer.encode(__A , add_special_tokens=__A )
tokenizer.save_pretrained(__A )
__UpperCamelCase = tokenizer.__class__.from_pretrained(__A )
__UpperCamelCase = after_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
shutil.rmtree(__A )
__UpperCamelCase = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__UpperCamelCase = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__UpperCamelCase = tokenizer.encode(__A , add_special_tokens=__A )
tokenizer.save_pretrained(__A )
__UpperCamelCase = tokenizer.__class__.from_pretrained(__A )
__UpperCamelCase = after_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
__UpperCamelCase = tokenizer.__class__.from_pretrained(__A , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__A )
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__A )
with open(os.path.join(__A , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__UpperCamelCase = json.load(__A )
with open(os.path.join(__A , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__UpperCamelCase = json.load(__A )
__UpperCamelCase = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
__UpperCamelCase = added_tokens_extra_ids + [
'an_additional_special_token'
]
__UpperCamelCase = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(__A , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__A , __A )
with open(os.path.join(__A , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__A , __A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCamelCase = tokenizer_class.from_pretrained(
__A , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCamelCase = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=__A )]
__UpperCamelCase = tokenizer_class.from_pretrained(
__A , additional_special_tokens=__A , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '�' )
def _lowerCamelCase ( self : Dict ):
pass
def _lowerCamelCase ( self : List[str] ):
pass
def _lowerCamelCase ( self : Tuple ):
pass
def _lowerCamelCase ( self : Optional[Any] ):
pass
def _lowerCamelCase ( self : str ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__UpperCamelCase = self.get_tokenizers(fast=__A , do_lower_case=__A )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__UpperCamelCase = tokenizer.convert_tokens_to_string(__A )
self.assertIsInstance(__A , __A )
| 53
|
'''simple docstring'''
def a ( __a ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
UpperCamelCase__ :Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ :int = 1
if upper_limit > 0:
UpperCamelCase__ :int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
__snake_case = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 97
| 0
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *_a , **_a ):
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 353
|
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
__a = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = 1
__a = FrozenDict(_a )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
__a = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = True
__a = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=_a , segmentation_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , )
def __UpperCAmelCase ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __UpperCAmelCase ( self ):
self.enable_attention_slicing(_a )
def __UpperCAmelCase ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__a = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ):
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _a , _a , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
__a = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
__a = self.segmentation_model(**_a )
__a = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__a = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__a = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_a , image=_a , mask_image=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , )
| 11
| 0
|
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class A ( yaml.SafeLoader ):
def A__ ( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowercase__ = [tuple(lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else key for key in keys]
lowercase__ = Counter(lowerCamelCase__ )
lowercase__ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__=False ) -> Dict:
'''simple docstring'''
lowercase__ = super().construct_mapping(lowerCamelCase__ , deep=lowerCamelCase__ )
self._check_no_duplicates_on_constructed_node(lowerCamelCase__ )
return mapping
def _A ( lowercase__ ):
lowercase__ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowercase__ = full_content[1:].index("""---""" ) + 1
lowercase__ = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowercase__ )
class A ( __UpperCAmelCase ):
# class attributes
lowerCamelCase : Any = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def A__ ( cls , lowerCamelCase__ ) -> "DatasetMetadata":
'''simple docstring'''
with open(lowerCamelCase__ , encoding="""utf-8""" ) as readme_file:
lowercase__ , lowercase__ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowerCamelCase__ )
else:
return cls()
def A__ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
if path.exists():
with open(lowerCamelCase__ , encoding="""utf-8""" ) as readme_file:
lowercase__ = readme_file.read()
else:
lowercase__ = None
lowercase__ = self._to_readme(lowerCamelCase__ )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ = None ) -> str:
'''simple docstring'''
if readme_content is not None:
lowercase__ , lowercase__ = _split_yaml_from_readme(lowerCamelCase__ )
lowercase__ = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
lowercase__ = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def A__ ( cls , lowerCamelCase__ ) -> "DatasetMetadata":
'''simple docstring'''
lowercase__ = yaml.load(lowerCamelCase__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowercase__ = {
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCamelCase__ )
def A__ ( self ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCamelCase__ , allow_unicode=lowerCamelCase__ , encoding="""utf-8""" , ).decode("""utf-8""" )
__A = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__A = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
__A = ap.parse_args()
__A = Path(args.readme_filepath)
__A = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 164
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A = logging.get_logger(__name__)
__A = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class A ( __UpperCAmelCase , __UpperCAmelCase ):
lowerCamelCase : Union[str, Any] = """convnextv2"""
def __init__( self , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="gelu" , lowerCamelCase__=0.02 , lowerCamelCase__=1e-12 , lowerCamelCase__=0.0 , lowerCamelCase__=224 , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = num_stages
lowercase__ = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
lowercase__ = [3, 3, 9, 3] if depths is None else depths
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = drop_path_rate
lowercase__ = image_size
lowercase__ = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
lowercase__ , lowercase__ = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
| 164
| 1
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""]
_SCREAMING_SNAKE_CASE = """CLIPImageProcessor"""
_SCREAMING_SNAKE_CASE = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Any , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , **UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase__ , )
UpperCamelCase = kwargs.pop('feature_extractor' )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : int , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
UpperCamelCase = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and images is not None:
UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def A ( self : Any , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Dict ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Optional[int] , *UpperCamelCase__ : int , **UpperCamelCase__ : Any ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self : Tuple ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase__ , )
return self.image_processor_class
@property
def A ( self : Optional[int] ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase__ , )
return self.image_processor
| 359
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __lowerCamelCase ( A__ ) -> Any:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = image.size
UpperCamelCase , UpperCamelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCamelCase = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] )
UpperCamelCase = np.array(A__ ).astype(np.floataa ) / 255.0
UpperCamelCase = image[None].transpose(0 , 3 , 1 , 2 )
UpperCamelCase = torch.from_numpy(A__ )
return 2.0 * image - 1.0
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase__ : VQModel , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , UpperCamelCase__ : Optional[int] = 1 , UpperCamelCase__ : Optional[int] = 1_0_0 , UpperCamelCase__ : Optional[float] = 0.0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
UpperCamelCase = 1
elif isinstance(UpperCamelCase__ , torch.Tensor ):
UpperCamelCase = image.shape[0]
else:
raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCamelCase__ )}""" )
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
UpperCamelCase = preprocess(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCamelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCamelCase = next(self.unet.parameters() ).dtype
UpperCamelCase = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__ )
UpperCamelCase = image.to(device=self.device , dtype=UpperCamelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCamelCase__ , device=self.device )
UpperCamelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase = {}
if accepts_eta:
UpperCamelCase = eta
for t in self.progress_bar(UpperCamelCase__ ):
# concat latents and low resolution image in the channel dimension.
UpperCamelCase = torch.cat([latents, image] , dim=1 )
UpperCamelCase = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# predict the noise residual
UpperCamelCase = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
# decode the image latents with the VQVAE
UpperCamelCase = self.vqvae.decode(UpperCamelCase__ ).sample
UpperCamelCase = torch.clamp(UpperCamelCase__ , -1.0 , 1.0 )
UpperCamelCase = image / 2 + 0.5
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 249
| 0
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase_ :Optional[str] = field(
default=A__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase_ :Optional[str] = field(
default=A__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase_ :Optional[str] = field(
default=A__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
UpperCAmelCase_ :bool = field(
default=A__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
UpperCAmelCase_ :str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
UpperCAmelCase_ :bool = field(
default=A__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :Optional[str] = field(default=A__ , metadata={"help": "The input training data file (a text file)."} )
UpperCAmelCase_ :Optional[str] = field(
default=A__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
UpperCAmelCase_ :bool = field(
default=A__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
UpperCAmelCase_ :Optional[int] = field(
default=A__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
UpperCAmelCase_ :Optional[int] = field(
default=A__ , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase_ :bool = field(
default=A__ , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
UpperCAmelCase_ :Optional[int] = field(
default=A__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCAmelCase_ :Optional[int] = field(
default=A__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
if self.train_file is not None:
lowerCAmelCase_ :List[Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCAmelCase_ :Tuple = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :PreTrainedTokenizerBase
UpperCAmelCase_ :Union[bool, str, PaddingStrategy] = True
UpperCAmelCase_ :Optional[int] = None
UpperCAmelCase_ :Optional[int] = None
def __call__( self , __A ) -> int:
lowerCAmelCase_ :Optional[Any] = """label""" if """label""" in features[0].keys() else """labels"""
lowerCAmelCase_ :Dict = [feature.pop(__UpperCAmelCase ) for feature in features]
lowerCAmelCase_ :Optional[int] = len(__UpperCAmelCase )
lowerCAmelCase_ :int = len(features[0]["""input_ids"""] )
lowerCAmelCase_ :Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(__UpperCAmelCase )] for feature in features
]
lowerCAmelCase_ :Union[str, Any] = list(chain(*__UpperCAmelCase ) )
lowerCAmelCase_ :List[str] = self.tokenizer.pad(
__UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
lowerCAmelCase_ :Dict = {k: v.view(__UpperCAmelCase , __UpperCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
lowerCAmelCase_ :Dict = torch.tensor(__UpperCAmelCase , dtype=torch.intaa )
return batch
def _snake_case ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , snake_case__ , snake_case__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase_ :Dict = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
datasets.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCAmelCase_ :Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase_ :Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCAmelCase_ :str = {}
if data_args.train_file is not None:
lowerCAmelCase_ :Any = data_args.train_file
if data_args.validation_file is not None:
lowerCAmelCase_ :Optional[int] = data_args.validation_file
lowerCAmelCase_ :Union[str, Any] = data_args.train_file.split(""".""" )[-1]
lowerCAmelCase_ :List[Any] = load_dataset(
snake_case__ , data_files=snake_case__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCAmelCase_ :Union[str, Any] = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase_ :Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase_ :Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase_ :Optional[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCAmelCase_ :Union[str, Any] = [f"""ending{i}""" for i in range(4 )]
lowerCAmelCase_ :Union[str, Any] = """sent1"""
lowerCAmelCase_ :Tuple = """sent2"""
if data_args.max_seq_length is None:
lowerCAmelCase_ :int = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
lowerCAmelCase_ :Optional[Any] = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCAmelCase_ :Tuple = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowercase__ : List[Any] ):
lowerCAmelCase_ :Tuple = [[context] * 4 for context in examples[context_name]]
lowerCAmelCase_ :Any = examples[question_header_name]
lowerCAmelCase_ :List[str] = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(snake_case__ )
]
# Flatten out
lowerCAmelCase_ :Optional[Any] = list(chain(*snake_case__ ) )
lowerCAmelCase_ :Optional[Any] = list(chain(*snake_case__ ) )
# Tokenize
lowerCAmelCase_ :str = tokenizer(
snake_case__ , snake_case__ , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(snake_case__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
lowerCAmelCase_ :Optional[Any] = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
lowerCAmelCase_ :List[str] = min(len(snake_case__ ) , data_args.max_train_samples )
lowerCAmelCase_ :Tuple = train_dataset.select(range(snake_case__ ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
lowerCAmelCase_ :List[str] = train_dataset.map(
snake_case__ , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
lowerCAmelCase_ :int = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
lowerCAmelCase_ :Optional[Any] = min(len(snake_case__ ) , data_args.max_eval_samples )
lowerCAmelCase_ :List[Any] = eval_dataset.select(range(snake_case__ ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
lowerCAmelCase_ :Optional[Any] = eval_dataset.map(
snake_case__ , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCAmelCase_ :str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=snake_case__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowercase__ : Optional[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ :str = eval_predictions
lowerCAmelCase_ :int = np.argmax(snake_case__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCAmelCase_ :Optional[int] = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=snake_case__ , data_collator=snake_case__ , compute_metrics=snake_case__ , )
# Training
if training_args.do_train:
lowerCAmelCase_ :int = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase_ :Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase_ :int = last_checkpoint
lowerCAmelCase_ :Tuple = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCAmelCase_ :Tuple = train_result.metrics
lowerCAmelCase_ :Any = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case__ )
)
lowerCAmelCase_ :List[Any] = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics("""train""" , snake_case__ )
trainer.save_metrics("""train""" , snake_case__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCAmelCase_ :Dict = trainer.evaluate()
lowerCAmelCase_ :Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(snake_case__ )
lowerCAmelCase_ :str = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics("""eval""" , snake_case__ )
trainer.save_metrics("""eval""" , snake_case__ )
lowerCAmelCase_ :Any = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
def _snake_case ( lowercase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 84
|
"""simple docstring"""
from __future__ import annotations
import math
def A ( snake_case__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = str(snake_case__ )
SCREAMING_SNAKE_CASE__ = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def A ( snake_case__ ):
'''simple docstring'''
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def A ( snake_case__ = 11 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 13
while len(snake_case__ ) != count:
if validate(snake_case__ ):
SCREAMING_SNAKE_CASE__ = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def A ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'{sum(compute_truncated_primes(11)) = }')
| 165
| 0
|
from __future__ import annotations
from typing import Any
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ = 0 ) -> None:
'''simple docstring'''
snake_case_ , snake_case_ : Any = row, column
snake_case_ : Union[str, Any] = [[default_value for c in range(__magic_name__ )] for r in range(__magic_name__ )]
def __str__(self ) -> str:
'''simple docstring'''
snake_case_ : int = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
snake_case_ : List[Any] = 0
for row_vector in self.array:
for obj in row_vector:
snake_case_ : Optional[int] = max(__magic_name__ , len(str(__magic_name__ ) ) )
snake_case_ : Dict = F'''%{max_element_length}s'''
# Make string and return
def single_line(__magic_name__ ) -> str:
nonlocal string_format_identifier
snake_case_ : List[Any] = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__magic_name__ ) for row_vector in self.array )
return s
def __repr__(self ) -> str:
'''simple docstring'''
return str(self )
def lowerCamelCase (self , __magic_name__ ) -> bool:
'''simple docstring'''
if not (isinstance(__magic_name__ , (list, tuple) ) and len(__magic_name__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__(self , __magic_name__ ) -> Any:
'''simple docstring'''
assert self.validate_indicies(__magic_name__ )
return self.array[loc[0]][loc[1]]
def __setitem__(self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
assert self.validate_indicies(__magic_name__ )
snake_case_ : Dict = value
def __add__(self , __magic_name__ ) -> Matrix:
'''simple docstring'''
assert isinstance(__magic_name__ , __magic_name__ )
assert self.row == another.row and self.column == another.column
# Add
snake_case_ : List[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case_ : Any = self[r, c] + another[r, c]
return result
def __neg__(self ) -> Matrix:
'''simple docstring'''
snake_case_ : str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case_ : Tuple = -self[r, c]
return result
def __sub__(self , __magic_name__ ) -> Matrix:
'''simple docstring'''
return self + (-another)
def __mul__(self , __magic_name__ ) -> Matrix:
'''simple docstring'''
if isinstance(__magic_name__ , (int, float) ): # Scalar multiplication
snake_case_ : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case_ : Optional[Any] = self[r, c] * another
return result
elif isinstance(__magic_name__ , __magic_name__ ): # Matrix multiplication
assert self.column == another.row
snake_case_ : str = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
snake_case_ : List[str] = F'''Unsupported type given for another ({type(__magic_name__ )})'''
raise TypeError(__magic_name__ )
def lowerCamelCase (self ) -> Matrix:
'''simple docstring'''
snake_case_ : List[Any] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
snake_case_ : Optional[int] = self[r, c]
return result
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
snake_case_ : Optional[int] = v.transpose()
snake_case_ : Optional[Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
snake_case_ : Optional[int] = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
snake_case_ : Union[str, Any] = Matrix(3 , 1 , 0 )
snake_case_ , snake_case_ , snake_case_ : str = 1, 2, -3
snake_case_ : str = Matrix(3 , 1 , 0 )
snake_case_ , snake_case_ , snake_case_ : Dict = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_UpperCamelCase , _UpperCamelCase )}''' )
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 353
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowerCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
if os.path.exists(_UpperCamelCase ):
if os.path.exists(os.path.join(_UpperCamelCase , '''config.json''' ) ) and os.path.isfile(
os.path.join(_UpperCamelCase , '''config.json''' ) ):
os.remove(os.path.join(_UpperCamelCase , '''config.json''' ) )
if os.path.exists(os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) )
else:
os.makedirs(_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> Optional[int]:
"""simple docstring"""
snake_case_ : List[Any] = 2
if unlogit:
snake_case_ : Any = torch.pow(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[Any] = p * torch.log(_UpperCamelCase )
snake_case_ : Dict = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(_UpperCamelCase ) ) ) )
for row in range(len(_UpperCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=False ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ : int = model.config.num_hidden_layers, model.config.num_attention_heads
snake_case_ : int = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device )
snake_case_ : Optional[int] = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device )
if head_mask is None:
snake_case_ : Tuple = torch.ones(_UpperCamelCase , _UpperCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=_UpperCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
snake_case_ : Dict = None
snake_case_ : Tuple = 0.0
snake_case_ : Dict = 0.0
for step, inputs in enumerate(tqdm(_UpperCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
snake_case_ : Any = tuple(t.to(args.device ) for t in inputs )
((snake_case_) , ) : Union[str, Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
snake_case_ : List[str] = model(_UpperCamelCase , labels=_UpperCamelCase , head_mask=_UpperCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
snake_case_ , snake_case_ , snake_case_ : int = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_UpperCamelCase ):
snake_case_ : Dict = entropy(attn.detach() , _UpperCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_UpperCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
snake_case_ : Union[str, Any] = 2
snake_case_ : Any = torch.pow(torch.pow(_UpperCamelCase , _UpperCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
snake_case_ : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(_UpperCamelCase )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(_UpperCamelCase )
logger.info('''Head ranked by importance scores''' )
snake_case_ : Optional[int] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
snake_case_ : Union[str, Any] = torch.arange(
head_importance.numel() , device=args.device )
snake_case_ : Dict = head_ranks.view_as(_UpperCamelCase )
print_ad_tensor(_UpperCamelCase )
return attn_entropy, head_importance, total_loss
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ : Optional[int] = compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase )
snake_case_ : Any = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , _UpperCamelCase , original_score * args.masking_threshold )
snake_case_ : Any = torch.ones_like(_UpperCamelCase )
snake_case_ : Union[str, Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
snake_case_ : List[Any] = original_score
while current_score >= original_score * args.masking_threshold:
snake_case_ : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
snake_case_ : Optional[Any] = float('''Inf''' )
snake_case_ : List[Any] = head_importance.view(-1 ).sort()[1]
if len(_UpperCamelCase ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
snake_case_ : Optional[int] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
snake_case_ : Optional[Any] = new_head_mask.view(-1 )
snake_case_ : int = 0.0
snake_case_ : List[Any] = new_head_mask.view_as(_UpperCamelCase )
snake_case_ : List[str] = new_head_mask.clone().detach()
print_ad_tensor(_UpperCamelCase )
# Compute metric and head importance again
snake_case_ , snake_case_ , snake_case_ : str = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , head_mask=_UpperCamelCase )
snake_case_ : Tuple = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , _UpperCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(_UpperCamelCase )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : str = datetime.now()
snake_case_ , snake_case_ , snake_case_ : List[Any] = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase )
snake_case_ : Union[str, Any] = 1 / loss
snake_case_ : Union[str, Any] = datetime.now() - before_time
snake_case_ : int = sum(p.numel() for p in model.parameters() )
snake_case_ : Tuple = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_UpperCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Any = [
v,
]
assert sum(len(_UpperCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_UpperCamelCase )
snake_case_ : Union[str, Any] = sum(p.numel() for p in model.parameters() )
snake_case_ : Dict = datetime.now()
snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase , actually_pruned=_UpperCamelCase , )
snake_case_ : Union[str, Any] = 1 / loss
snake_case_ : Optional[Any] = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , _UpperCamelCase , _UpperCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , _UpperCamelCase , _UpperCamelCase )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(_UpperCamelCase , args.output_dir )
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=_UpperCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=_UpperCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=_UpperCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=_UpperCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=_UpperCamelCase , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=_UpperCamelCase , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=_UpperCamelCase , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=_UpperCamelCase , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=_UpperCamelCase , default=42 )
parser.add_argument('''--local_rank''' , type=_UpperCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
snake_case_ : Any = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
snake_case_ : Tuple = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
snake_case_ : Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
snake_case_ : List[str] = torch.device('''cuda''' , args.local_rank )
snake_case_ : Union[str, Any] = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
snake_case_ : int = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
snake_case_ : Any = nn.parallel.DistributedDataParallel(
_UpperCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_UpperCamelCase )
elif args.n_gpu > 1:
snake_case_ : Dict = nn.DataParallel(_UpperCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_UpperCamelCase )
torch.save(_UpperCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , _UpperCamelCase )
# Prepare dataset
snake_case_ : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
snake_case_ : Any = (torch.from_numpy(_UpperCamelCase ),)
snake_case_ : Any = TensorDataset(*_UpperCamelCase )
snake_case_ : List[str] = RandomSampler(_UpperCamelCase )
snake_case_ : int = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
snake_case_ : List[str] = mask_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
prune_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 279
| 0
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
_snake_case : Any = input("Enter image url: ").strip()
print(f'''Downloading image from {url} ...''')
_snake_case : Any = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
_snake_case : List[Any] = soup.find("meta", {"property": "og:image"})["content"]
_snake_case : Optional[Any] = requests.get(image_url).content
_snake_case : Union[str, Any] = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f'''Done. Image saved to disk as {file_name}.''')
| 123
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : Union[str, Any] = "▁"
_snake_case : Any = {"vocab_file": "prophetnet.tokenizer"}
_snake_case : Tuple = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
_snake_case : Any = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
_snake_case : Any = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Union[str, Any] = collections.OrderedDict()
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as reader:
__snake_case : Dict = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
__snake_case : Optional[int] = token.rstrip("\n" )
__snake_case : Union[str, Any] = index
return vocab
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
__UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Any = ["input_ids", "attention_mask"]
def __init__( self : Any , lowerCamelCase : str , lowerCamelCase : Optional[Any]="[SEP]" , lowerCamelCase : List[str]="[SEP]" , lowerCamelCase : Dict="[SEP]" , lowerCamelCase : Optional[int]="[UNK]" , lowerCamelCase : Dict="[PAD]" , lowerCamelCase : str="[CLS]" , lowerCamelCase : str="[MASK]" , lowerCamelCase : Optional[Dict[str, Any]] = None , **lowerCamelCase : int , ) -> None:
__snake_case : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , sep_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
__snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
__snake_case : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__snake_case : List[str] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
__snake_case : List[str] = F'[unused{i}]'
__snake_case : int = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__snake_case : Any = 12
__snake_case : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(lowerCamelCase )
def __getstate__( self : Union[str, Any] ) -> Union[str, Any]:
__snake_case : Union[str, Any] = self.__dict__.copy()
__snake_case : int = None
return state
def __setstate__( self : Tuple , lowerCamelCase : List[str] ) -> Tuple:
__snake_case : Tuple = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case : Tuple = {}
__snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self : Dict , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return ([0] * len(lowerCamelCase )) + [1]
return ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase )) + [1]
def __snake_case ( self : Any , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
__snake_case : str = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case ( self : str ) -> str:
return len(self.sp_model ) + self.fairseq_offset
def __snake_case ( self : Any ) -> int:
__snake_case : Dict = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case ( self : str , lowerCamelCase : str ) -> str:
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def __snake_case ( self : List[str] , lowerCamelCase : Dict ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case : List[Any] = self.sp_model.PieceToId(lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __snake_case ( self : Dict , lowerCamelCase : Optional[int] ) -> Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case ( self : int , lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
__snake_case : str = "".join(lowerCamelCase ).replace(lowerCamelCase , " " ).strip()
return out_string
def __snake_case ( self : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__snake_case : List[str] = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , "wb" ) as fi:
__snake_case : Any = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__snake_case : List[str] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 123
| 1
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCAmelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCamelCase (a_ :Optional[int] , a_ :List[str] , a_ :Dict , a_ :Optional[int] , a_ :int) -> Any:
for attribute in key.split('''.'''):
lowercase :List[str] = getattr(a_ , a_)
if weight_type is not None:
lowercase :str = getattr(a_ , a_).shape
else:
lowercase :Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowercase :Dict = value
elif weight_type == "weight_g":
lowercase :str = value
elif weight_type == "weight_v":
lowercase :str = value
elif weight_type == "bias":
lowercase :str = value
else:
lowercase :int = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""")
def lowerCamelCase (a_ :Tuple , a_ :int) -> Tuple:
lowercase :List[Any] = []
lowercase :int = fairseq_model.state_dict()
lowercase :List[Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase :List[str] = None
for name, value in fairseq_dict.items():
lowercase :Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
a_ , a_ , a_ , a_ , hf_model.config.feat_extract_norm == '''group''' , )
lowercase :Optional[int] = True
elif name.split('''.''')[0] == "proj":
lowercase :Dict = fairseq_model.proj
lowercase :Any = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''')[-1] == name.split('''.''')[0]:
lowercase :Optional[Any] = True
if "*" in mapped_key:
lowercase :Any = name.split(a_)[0].split('''.''')[-2]
lowercase :Dict = mapped_key.replace('''*''' , a_)
if "weight_g" in name:
lowercase :str = '''weight_g'''
elif "weight_v" in name:
lowercase :List[str] = '''weight_v'''
elif "bias" in name:
lowercase :Tuple = '''bias'''
elif "weight" in name:
lowercase :Any = '''weight'''
else:
lowercase :Dict = None
set_recursively(a_ , a_ , a_ , a_ , a_)
continue
if not is_used:
unused_weights.append(a_)
logger.warning(F"""Unused weights: {unused_weights}""")
return proj_weight
def lowerCamelCase (a_ :Optional[Any] , a_ :List[Any] , a_ :Dict , a_ :List[Any] , a_ :List[Any]) -> Optional[Any]:
lowercase :Dict = full_name.split('''conv_layers.''')[-1]
lowercase :List[str] = name.split('''.''')
lowercase :Optional[Any] = int(items[0])
lowercase :Union[str, Any] = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowercase :List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowercase :Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowercase :Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowercase :Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""")
else:
unused_weights.append(a_)
def lowerCamelCase (a_ :List[Any]) -> Any:
lowercase , lowercase :str = emb.weight.shape
lowercase :str = nn.Linear(a_ , a_ , bias=a_)
lowercase :Union[str, Any] = emb.weight.data
return lin_layer
def lowerCamelCase (a_ :List[Any]) -> Dict:
with open(a_ , '''r''' , encoding='''utf-8''') as f:
lowercase :Optional[Any] = f.readlines()
lowercase :List[Any] = [line.split(''' ''')[0] for line in lines]
lowercase :List[str] = len(a_)
lowercase :Optional[int] = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(a_ , range(4 , num_words + 4))))
return vocab_dict
@torch.no_grad()
def lowerCamelCase (a_ :Tuple , a_ :Any , a_ :Dict , a_ :str , a_ :str , a_ :str , a_ :Union[str, Any] , ) -> List[str]:
lowercase :List[str] = WavaVecaConfig.from_pretrained(a_)
lowercase :str = SpeechaTextaConfig.from_pretrained(
a_ , vocab_size=a_ , decoder_layers=a_ , do_stable_layer_norm=a_)
lowercase :int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=a_ , return_attention_mask=a_ , )
lowercase , lowercase , lowercase :Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''')[:-1])})
lowercase :Optional[int] = model[0].eval()
# set weights for wav2vec2 encoder
lowercase :List[str] = WavaVecaModel(a_)
lowercase :Dict = recursively_load_weights_wavaveca(model.encoder , a_)
lowercase :int = SpeechaTextaForCausalLM(a_)
lowercase , lowercase :int = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a_)
# set output linear layer
unexpected_keys.remove('''embed_out''')
lowercase :Dict = nn.Parameter(model.decoder.embed_out.detach())
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""")
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""")
lowercase :List[str] = SpeechEncoderDecoderModel(encoder=a_ , decoder=a_)
lowercase :List[str] = False
# add projection layer
lowercase :List[str] = nn.Parameter(projection_layer.weight)
lowercase :Optional[Any] = nn.Parameter(projection_layer.bias)
lowercase :str = create_vocab_dict(a_)
with open(os.path.join(a_ , '''vocab.json''') , '''w''') as fp:
json.dump(a_ , a_)
lowercase :Optional[int] = SpeechaTextaTokenizer(os.path.join(a_ , '''vocab.json'''))
tokenizer.save_pretrained(a_)
lowercase :Any = hf_wavavec.config.to_dict()
lowercase :int = tokenizer.pad_token_id
lowercase :Optional[Any] = tokenizer.bos_token_id
lowercase :Any = tokenizer.eos_token_id
lowercase :Union[str, Any] = '''speech_to_text_2'''
lowercase :Any = '''wav2vec2'''
lowercase :Dict = SpeechEncoderDecoderConfig.from_dict(a_)
hf_wavavec.save_pretrained(a_)
feature_extractor.save_pretrained(a_)
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=10_224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
UpperCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 172
|
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
UpperCAmelCase = logging.get_logger(__name__)
def lowerCamelCase (a_ :str , a_ :Optional[int]) -> Union[str, Any]:
lowercase :List[str] = set()
lowercase :Dict = []
def parse_line(a_ :Dict):
for line in fp:
if isinstance(a_ , a_):
lowercase :Any = line.decode('''UTF-8''')
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' '''):
# process a single warning and move it to `selected_warnings`.
if len(a_) > 0:
lowercase :int = '''\n'''.join(a_)
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets):
selected_warnings.add(a_)
buffer.clear()
continue
else:
lowercase :Any = line.strip()
buffer.append(a_)
if from_gh:
for filename in os.listdir(a_):
lowercase :Optional[int] = os.path.join(a_ , a_)
if not os.path.isdir(a_):
# read the file
if filename != "warnings.txt":
continue
with open(a_) as fp:
parse_line(a_)
else:
try:
with zipfile.ZipFile(a_) as z:
for filename in z.namelist():
if not os.path.isdir(a_):
# read the file
if filename != "warnings.txt":
continue
with z.open(a_) as fp:
parse_line(a_)
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""")
return selected_warnings
def lowerCamelCase (a_ :Any , a_ :Optional[int]) -> Any:
lowercase :Tuple = set()
lowercase :Dict = [os.path.join(a_ , a_) for p in os.listdir(a_) if (p.endswith('''.zip''') or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(a_ , a_))
return selected_warnings
if __name__ == "__main__":
def lowerCamelCase (a_ :List[Any]) -> Optional[Any]:
return values.split(''',''')
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
UpperCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
UpperCAmelCase = extract_warnings(args.output_dir, args.targets)
UpperCAmelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 172
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_: Optional[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] ={
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
a__ : int = """convnextv2"""
def __init__(self : Tuple , __a : Any=3 , __a : List[Any]=4 , __a : List[Any]=4 , __a : str=None , __a : Union[str, Any]=None , __a : Any="gelu" , __a : str=0.02 , __a : str=1E-12 , __a : str=0.0 , __a : str=224 , __a : Optional[Any]=None , __a : Union[str, Any]=None , **__a : Tuple , ):
super().__init__(**__a )
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_stages
UpperCAmelCase_ = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
UpperCAmelCase_ = [3, 3, 9, 3] if depths is None else depths
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = image_size
UpperCAmelCase_ = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 1
|
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Dict , A : int , A : int , A : int , A : Union[str, Any]=0.0 , A : Optional[int] = None , A : str = "geglu" , A : Optional[int] = None , A : bool = False , A : bool = False , A : bool = False , A : bool = False , A : bool = True , A : str = "layer_norm" , A : bool = False , ) ->Any:
super().__init__()
lowerCamelCase__ : int = only_cross_attention
lowerCamelCase__ : Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
lowerCamelCase__ : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
F" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
lowerCamelCase__ : Optional[Any] = AdaLayerNorm(A , A )
elif self.use_ada_layer_norm_zero:
lowerCamelCase__ : int = AdaLayerNormZero(A , A )
else:
lowerCamelCase__ : Dict = nn.LayerNorm(A , elementwise_affine=A )
lowerCamelCase__ : Any = Attention(
query_dim=A , heads=A , dim_head=A , dropout=A , bias=A , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=A , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
lowerCamelCase__ : Tuple = (
AdaLayerNorm(A , A )
if self.use_ada_layer_norm
else nn.LayerNorm(A , elementwise_affine=A )
)
lowerCamelCase__ : int = Attention(
query_dim=A , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=A , dim_head=A , dropout=A , bias=A , upcast_attention=A , ) # is self-attn if encoder_hidden_states is none
else:
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Tuple = None
# 3. Feed-forward
lowerCamelCase__ : Optional[int] = nn.LayerNorm(A , elementwise_affine=A )
lowerCamelCase__ : Union[str, Any] = FeedForward(A , dropout=A , activation_fn=A , final_dropout=A )
# let chunk size default to None
lowerCamelCase__ : str = None
lowerCamelCase__ : Tuple = 0
def __lowerCamelCase ( self : Any , A : Optional[int] , A : int ) ->List[str]:
# Sets chunk feed-forward
lowerCamelCase__ : List[Any] = chunk_size
lowerCamelCase__ : List[str] = dim
def __lowerCamelCase ( self : str , A : torch.FloatTensor , A : Optional[torch.FloatTensor] = None , A : Optional[torch.FloatTensor] = None , A : Optional[torch.FloatTensor] = None , A : Optional[torch.LongTensor] = None , A : Dict[str, Any] = None , A : Optional[torch.LongTensor] = None , ) ->Tuple:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
lowerCamelCase__ : Union[str, Any] = self.norma(A , A )
elif self.use_ada_layer_norm_zero:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = self.norma(
A , A , A , hidden_dtype=hidden_states.dtype )
else:
lowerCamelCase__ : List[str] = self.norma(A )
lowerCamelCase__ : str = cross_attention_kwargs if cross_attention_kwargs is not None else {}
lowerCamelCase__ : Any = self.attna(
A , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=A , **A , )
if self.use_ada_layer_norm_zero:
lowerCamelCase__ : Any = gate_msa.unsqueeze(1 ) * attn_output
lowerCamelCase__ : Optional[int] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
lowerCamelCase__ : int = (
self.norma(A , A ) if self.use_ada_layer_norm else self.norma(A )
)
lowerCamelCase__ : int = self.attna(
A , encoder_hidden_states=A , attention_mask=A , **A , )
lowerCamelCase__ : Any = attn_output + hidden_states
# 3. Feed-forward
lowerCamelCase__ : Union[str, Any] = self.norma(A )
if self.use_ada_layer_norm_zero:
lowerCamelCase__ : Optional[int] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." )
lowerCamelCase__ : Optional[int] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
lowerCamelCase__ : Optional[int] = torch.cat(
[self.ff(A ) for hid_slice in norm_hidden_states.chunk(A , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
lowerCamelCase__ : Optional[int] = self.ff(A )
if self.use_ada_layer_norm_zero:
lowerCamelCase__ : Optional[Any] = gate_mlp.unsqueeze(1 ) * ff_output
lowerCamelCase__ : List[Any] = ff_output + hidden_states
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Any , A : int , A : Optional[int] = None , A : int = 4 , A : float = 0.0 , A : str = "geglu" , A : bool = False , ) ->int:
super().__init__()
lowerCamelCase__ : List[Any] = int(dim * mult )
lowerCamelCase__ : List[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
lowerCamelCase__ : int = GELU(A , A )
if activation_fn == "gelu-approximate":
lowerCamelCase__ : Optional[int] = GELU(A , A , approximate='''tanh''' )
elif activation_fn == "geglu":
lowerCamelCase__ : Any = GEGLU(A , A )
elif activation_fn == "geglu-approximate":
lowerCamelCase__ : int = ApproximateGELU(A , A )
lowerCamelCase__ : Union[str, Any] = nn.ModuleList([] )
# project in
self.net.append(A )
# project dropout
self.net.append(nn.Dropout(A ) )
# project out
self.net.append(nn.Linear(A , A ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(A ) )
def __lowerCamelCase ( self : Dict , A : List[Any] ) ->Optional[Any]:
for module in self.net:
lowerCamelCase__ : int = module(A )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Tuple , A : int , A : int , A : str = "none" ) ->Optional[Any]:
super().__init__()
lowerCamelCase__ : List[Any] = nn.Linear(A , A )
lowerCamelCase__ : Any = approximate
def __lowerCamelCase ( self : List[str] , A : Tuple ) ->str:
if gate.device.type != "mps":
return F.gelu(A , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __lowerCamelCase ( self : List[str] , A : str ) ->Optional[int]:
lowerCamelCase__ : List[str] = self.proj(A )
lowerCamelCase__ : Optional[int] = self.gelu(A )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Tuple , A : int , A : int ) ->Dict:
super().__init__()
lowerCamelCase__ : Optional[Any] = nn.Linear(A , dim_out * 2 )
def __lowerCamelCase ( self : List[Any] , A : List[Any] ) ->Tuple:
if gate.device.type != "mps":
return F.gelu(A )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __lowerCamelCase ( self : Any , A : Union[str, Any] ) ->Any:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.proj(A ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(A )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Any , A : int , A : int ) ->str:
super().__init__()
lowerCamelCase__ : Optional[int] = nn.Linear(A , A )
def __lowerCamelCase ( self : Union[str, Any] , A : Dict ) ->Optional[Any]:
lowerCamelCase__ : List[str] = self.proj(A )
return x * torch.sigmoid(1.7_02 * x )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : int , A : Dict , A : Optional[Any] ) ->str:
super().__init__()
lowerCamelCase__ : List[str] = nn.Embedding(A , A )
lowerCamelCase__ : str = nn.SiLU()
lowerCamelCase__ : int = nn.Linear(A , embedding_dim * 2 )
lowerCamelCase__ : Optional[Any] = nn.LayerNorm(A , elementwise_affine=A )
def __lowerCamelCase ( self : int , A : Union[str, Any] , A : Union[str, Any] ) ->Union[str, Any]:
lowerCamelCase__ : Union[str, Any] = self.linear(self.silu(self.emb(A ) ) )
lowerCamelCase__ , lowerCamelCase__ : List[str] = torch.chunk(A , 2 )
lowerCamelCase__ : Any = self.norm(A ) * (1 + scale) + shift
return x
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : str , A : Optional[Any] , A : int ) ->str:
super().__init__()
lowerCamelCase__ : Union[str, Any] = CombinedTimestepLabelEmbeddings(A , A )
lowerCamelCase__ : int = nn.SiLU()
lowerCamelCase__ : List[str] = nn.Linear(A , 6 * embedding_dim , bias=A )
lowerCamelCase__ : str = nn.LayerNorm(A , elementwise_affine=A , eps=1e-6 )
def __lowerCamelCase ( self : List[str] , A : Any , A : List[Any] , A : Tuple , A : Dict=None ) ->Union[str, Any]:
lowerCamelCase__ : List[Any] = self.linear(self.silu(self.emb(A , A , hidden_dtype=A ) ) )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = emb.chunk(6 , dim=1 )
lowerCamelCase__ : List[Any] = self.norm(A ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Any , A : int , A : int , A : int , A : Optional[str] = None , A : float = 1e-5 ) ->Any:
super().__init__()
lowerCamelCase__ : int = num_groups
lowerCamelCase__ : List[str] = eps
if act_fn is None:
lowerCamelCase__ : Tuple = None
else:
lowerCamelCase__ : Dict = get_activation(A )
lowerCamelCase__ : Any = nn.Linear(A , out_dim * 2 )
def __lowerCamelCase ( self : List[str] , A : Optional[int] , A : str ) ->Tuple:
if self.act:
lowerCamelCase__ : Union[str, Any] = self.act(A )
lowerCamelCase__ : Optional[Any] = self.linear(A )
lowerCamelCase__ : Optional[Any] = emb[:, :, None, None]
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = emb.chunk(2 , dim=1 )
lowerCamelCase__ : str = F.group_norm(A , self.num_groups , eps=self.eps )
lowerCamelCase__ : Dict = x * (1 + scale) + shift
return x
| 142
| 0
|
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class A__ ( lowerCamelCase__ ):
"""simple docstring"""
UpperCamelCase_ : Dict = '''xlm-prophetnet'''
UpperCamelCase_ : int = ['''past_key_values''']
UpperCamelCase_ : Any = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[Union[str, Callable]] = "gelu" , lowerCAmelCase__ : Optional[int] = 3_0_5_2_2 , lowerCAmelCase__ : Optional[int] = 1_0_2_4 , lowerCAmelCase__ : Optional[int] = 4_0_9_6 , lowerCAmelCase__ : Optional[int] = 1_2 , lowerCAmelCase__ : Optional[int] = 1_6 , lowerCAmelCase__ : Optional[int] = 4_0_9_6 , lowerCAmelCase__ : Optional[int] = 1_2 , lowerCAmelCase__ : Optional[int] = 1_6 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[int] = 5_1_2 , lowerCAmelCase__ : Optional[float] = 0.02 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 2 , lowerCAmelCase__ : Optional[int] = 3_2 , lowerCAmelCase__ : Optional[int] = 1_2_8 , lowerCAmelCase__ : Optional[bool] = False , lowerCAmelCase__ : Optional[float] = 0.0 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 2 , **lowerCAmelCase__ : int , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Dict = vocab_size
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : str = encoder_ffn_dim
_UpperCAmelCase : Dict = num_encoder_layers
_UpperCAmelCase : Dict = num_encoder_attention_heads
_UpperCAmelCase : int = decoder_ffn_dim
_UpperCAmelCase : List[str] = num_decoder_layers
_UpperCAmelCase : Optional[int] = num_decoder_attention_heads
_UpperCAmelCase : Optional[Any] = max_position_embeddings
_UpperCAmelCase : List[Any] = init_std # Normal(0, this parameter)
_UpperCAmelCase : Optional[Any] = activation_function
# parameters for xlmprophetnet
_UpperCAmelCase : str = ngram
_UpperCAmelCase : Dict = num_buckets
_UpperCAmelCase : str = relative_max_distance
_UpperCAmelCase : Optional[Any] = disable_ngram_loss
_UpperCAmelCase : str = eps
# 3 Types of Dropout
_UpperCAmelCase : int = attention_dropout
_UpperCAmelCase : Optional[Any] = activation_dropout
_UpperCAmelCase : List[str] = dropout
_UpperCAmelCase : Tuple = use_cache
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , add_cross_attention=__snake_case , decoder_start_token_id=__snake_case , **__snake_case , )
@property
def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Dict ) -> Tuple:
"""simple docstring"""
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 357
|
'''simple docstring'''
def __UpperCAmelCase ( a_: int, a_: int ):
if not isinstance(a_, a_ ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(a_, a_ ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
_UpperCAmelCase : List[str] = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(a_ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class _SCREAMING_SNAKE_CASE:
def __init__( self ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = []
__SCREAMING_SNAKE_CASE :Tuple = 0
__SCREAMING_SNAKE_CASE :Optional[Any] = 0
def _UpperCamelCase ( self ) -> bool:
"""simple docstring"""
return self.head == self.tail
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
self.data.append(_a )
__SCREAMING_SNAKE_CASE :Tuple = self.tail + 1
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = self.data[self.head]
__SCREAMING_SNAKE_CASE :Optional[Any] = self.head + 1
return ret
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.tail - self.head
def _UpperCamelCase ( self ) -> None:
"""simple docstring"""
print(self.data )
print('''**************''' )
print(self.data[self.head : self.tail] )
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = data
__SCREAMING_SNAKE_CASE :Tuple = None
__SCREAMING_SNAKE_CASE :Union[str, Any] = None
__SCREAMING_SNAKE_CASE :List[Any] = 1
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return self.data
def _UpperCamelCase ( self ) -> MyNode | None:
"""simple docstring"""
return self.left
def _UpperCamelCase ( self ) -> MyNode | None:
"""simple docstring"""
return self.right
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.height
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = data
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = node
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = node
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = height
def __lowerCamelCase ( a_ : MyNode | None ) -> int:
if node is None:
return 0
return node.get_height()
def __lowerCamelCase ( a_ : int , a_ : int ) -> int:
if a > b:
return a
return b
def __lowerCamelCase ( a_ : MyNode ) -> MyNode:
print('''left rotation node:''' , node.get_data() )
__SCREAMING_SNAKE_CASE :Optional[Any] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_UpperCamelCase )
__SCREAMING_SNAKE_CASE :List[str] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_UpperCamelCase )
__SCREAMING_SNAKE_CASE :List[Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_UpperCamelCase )
return ret
def __lowerCamelCase ( a_ : MyNode ) -> MyNode:
print('''right rotation node:''' , node.get_data() )
__SCREAMING_SNAKE_CASE :Dict = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_UpperCamelCase )
__SCREAMING_SNAKE_CASE :Union[str, Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_UpperCamelCase )
__SCREAMING_SNAKE_CASE :Union[str, Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_UpperCamelCase )
return ret
def __lowerCamelCase ( a_ : MyNode ) -> MyNode:
__SCREAMING_SNAKE_CASE :Tuple = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_UpperCamelCase ) )
return right_rotation(_UpperCamelCase )
def __lowerCamelCase ( a_ : MyNode ) -> MyNode:
__SCREAMING_SNAKE_CASE :Optional[Any] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_UpperCamelCase ) )
return left_rotation(_UpperCamelCase )
def __lowerCamelCase ( a_ : MyNode | None , a_ : Any ) -> MyNode | None:
if node is None:
return MyNode(_UpperCamelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _UpperCamelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
__SCREAMING_SNAKE_CASE :Tuple = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
__SCREAMING_SNAKE_CASE :Union[str, Any] = right_rotation(_UpperCamelCase )
else:
__SCREAMING_SNAKE_CASE :Dict = lr_rotation(_UpperCamelCase )
else:
node.set_right(insert_node(node.get_right() , _UpperCamelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
__SCREAMING_SNAKE_CASE :Optional[Any] = node.get_right()
assert right_child is not None
if data < right_child.get_data():
__SCREAMING_SNAKE_CASE :Dict = rl_rotation(_UpperCamelCase )
else:
__SCREAMING_SNAKE_CASE :str = left_rotation(_UpperCamelCase )
__SCREAMING_SNAKE_CASE :str = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_UpperCamelCase )
return node
def __lowerCamelCase ( a_ : MyNode ) -> Any:
while True:
__SCREAMING_SNAKE_CASE :Tuple = root.get_right()
if right_child is None:
break
__SCREAMING_SNAKE_CASE :Optional[Any] = right_child
return root.get_data()
def __lowerCamelCase ( a_ : MyNode ) -> Any:
while True:
__SCREAMING_SNAKE_CASE :Dict = root.get_left()
if left_child is None:
break
__SCREAMING_SNAKE_CASE :Optional[Any] = left_child
return root.get_data()
def __lowerCamelCase ( a_ : MyNode , a_ : Any ) -> MyNode | None:
__SCREAMING_SNAKE_CASE :List[str] = root.get_left()
__SCREAMING_SNAKE_CASE :List[Any] = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
__SCREAMING_SNAKE_CASE :List[str] = get_left_most(_UpperCamelCase )
root.set_data(_UpperCamelCase )
root.set_right(del_node(_UpperCamelCase , _UpperCamelCase ) )
elif left_child is not None:
__SCREAMING_SNAKE_CASE :List[str] = left_child
elif right_child is not None:
__SCREAMING_SNAKE_CASE :int = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(_UpperCamelCase , _UpperCamelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_UpperCamelCase , _UpperCamelCase ) )
if get_height(_UpperCamelCase ) - get_height(_UpperCamelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
__SCREAMING_SNAKE_CASE :int = left_rotation(_UpperCamelCase )
else:
__SCREAMING_SNAKE_CASE :int = rl_rotation(_UpperCamelCase )
elif get_height(_UpperCamelCase ) - get_height(_UpperCamelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
__SCREAMING_SNAKE_CASE :Optional[int] = right_rotation(_UpperCamelCase )
else:
__SCREAMING_SNAKE_CASE :List[str] = lr_rotation(_UpperCamelCase )
__SCREAMING_SNAKE_CASE :Optional[int] = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(_UpperCamelCase )
return root
class _SCREAMING_SNAKE_CASE:
def __init__( self ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = None
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return get_height(self.root )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
print('''insert:''' + str(_a ) )
__SCREAMING_SNAKE_CASE :Tuple = insert_node(self.root ,_a )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
print('''delete:''' + str(_a ) )
if self.root is None:
print('''Tree is empty!''' )
return
__SCREAMING_SNAKE_CASE :Optional[int] = del_node(self.root ,_a )
def __str__( self ,) -> str: # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = ''''''
__SCREAMING_SNAKE_CASE :List[Any] = MyQueue()
q.push(self.root )
__SCREAMING_SNAKE_CASE :Optional[int] = self.get_height()
if layer == 0:
return output
__SCREAMING_SNAKE_CASE :int = 0
while not q.is_empty():
__SCREAMING_SNAKE_CASE :Any = q.pop()
__SCREAMING_SNAKE_CASE :int = ''' ''' * int(math.pow(2 ,layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(_a )
q.push(_a )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
__SCREAMING_SNAKE_CASE :Tuple = cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 ,_a ) - 1:
__SCREAMING_SNAKE_CASE :List[str] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __lowerCamelCase ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
lowerCamelCase_ = AVLtree()
lowerCamelCase_ = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 191
|
'''simple docstring'''
import numpy as np
from PIL import Image
def _lowerCAmelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.array(_UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
# compute the shape of the output matrix
_SCREAMING_SNAKE_CASE =(arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
_SCREAMING_SNAKE_CASE =np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
_SCREAMING_SNAKE_CASE =np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
return updated_arr
def _lowerCAmelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.array(_UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
# compute the shape of the output matrix
_SCREAMING_SNAKE_CASE =(arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
_SCREAMING_SNAKE_CASE =np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
_SCREAMING_SNAKE_CASE =int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
lowerCamelCase : Optional[Any] = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 47
| 0
|
from sklearn.metrics import recall_score
import datasets
UpperCAmelCase_ : Optional[int] = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
UpperCAmelCase_ : str = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
UpperCAmelCase_ : Optional[int] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'] , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]="binary" , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : int="warn" , ) -> Optional[Any]:
a_ : List[str] = recall_score(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , pos_label=SCREAMING_SNAKE_CASE__ , average=SCREAMING_SNAKE_CASE__ , sample_weight=SCREAMING_SNAKE_CASE__ , zero_division=SCREAMING_SNAKE_CASE__ , )
return {"recall": float(SCREAMING_SNAKE_CASE__ ) if score.size == 1 else score}
| 120
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a_ : str = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
a_ : Any = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
a_ : Dict = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
a_ : Dict = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
pipeline(task='fill-mask' , model=SCREAMING_SNAKE_CASE__ )
# baseline - just load from_pretrained with normal network
a_ : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
a_ : List[str] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a_ : Any = '1'
a_ : Optional[Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a_ : Tuple = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
a_ : str = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
a_ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
a_ : Tuple = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
pipeline(task='fill-mask' , model=SCREAMING_SNAKE_CASE__ )
# baseline - just load from_pretrained with normal network
a_ : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
a_ : Dict = self.get_env()
a_ : Dict = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a_ : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
a_ : Dict = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
a_ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
a_ : Any = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
a_ : Tuple = self.get_env()
a_ : int = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
a_ : str = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a_ : Dict = '1'
a_ : int = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
a_ : Union[str, Any] = '\nfrom transformers import pipeline\n '
a_ : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
a_ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
a_ : Union[str, Any] = self.get_env()
a_ : Optional[Any] = '1'
a_ : int = [sys.executable, '-c', '\n'.join([load, mock, run] )]
a_ : Union[str, Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : Optional[int] = '\nfrom transformers import AutoModel\n '
a_ : Dict = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
a_ : Tuple = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
a_ : Optional[Any] = self.get_env()
a_ : Dict = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a_ : Optional[int] = '1'
a_ : Optional[Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 120
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.