code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class A_ ( unittest.TestCase ):
@property
def lowerCAmelCase ( self : str):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Tuple = ort.SessionOptions()
__lowerCamelCase : str = False
return options
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png')
__lowerCamelCase : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png')
__lowerCamelCase : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy')
# using the PNDM scheduler by default
__lowerCamelCase : Tuple = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=SCREAMING_SNAKE_CASE__ ,feature_extractor=SCREAMING_SNAKE_CASE__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = 'A red cat sitting on a park bench'
__lowerCamelCase : Union[str, Any] = np.random.RandomState(0)
__lowerCamelCase : Any = pipe(
prompt=SCREAMING_SNAKE_CASE__ ,image=SCREAMING_SNAKE_CASE__ ,mask_image=SCREAMING_SNAKE_CASE__ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=1_5 ,generator=SCREAMING_SNAKE_CASE__ ,output_type='np' ,)
__lowerCamelCase : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 73
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class UpperCAmelCase ( A_ ):
A__ : jnp.ndarray
@flax_register_to_config
class UpperCAmelCase ( nn.Module ,A_ ,A_ ):
A__ : int = 32
A__ : int = 4
A__ : int = 4
A__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
A__ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
A__ : Union[bool, Tuple[bool]] = False
A__ : Tuple[int] = (3_20, 6_40, 12_80, 12_80)
A__ : int = 2
A__ : Union[int, Tuple[int]] = 8
A__ : Optional[Union[int, Tuple[int]]] = None
A__ : int = 12_80
A__ : float = 0.0
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
A__ : bool = True
A__ : int = 0
A__ : bool = False
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : jax.random.KeyArray ) -> FrozenDict:
'''simple docstring'''
snake_case : Dict = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case : Any = jnp.zeros(snake_case__ , dtype=jnp.floataa )
snake_case : List[str] = jnp.ones((1,) , dtype=jnp.intaa )
snake_case : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case , snake_case : Optional[int] = jax.random.split(snake_case__ )
snake_case : Union[str, Any] = {"params": params_rng, "dropout": dropout_rng}
return self.init(snake_case__ , snake_case__ , snake_case__ , snake_case__ )["params"]
def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple:
'''simple docstring'''
snake_case : str = self.block_out_channels
snake_case : Optional[Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case : Tuple = self.num_attention_heads or self.attention_head_dim
# input
snake_case : Tuple = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case : Union[str, Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case : Dict = FlaxTimestepEmbedding(snake_case__ , dtype=self.dtype )
snake_case : List[str] = self.only_cross_attention
if isinstance(snake_case__ , snake_case__ ):
snake_case : List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case__ , snake_case__ ):
snake_case : List[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case : List[Any] = []
snake_case : Optional[int] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
snake_case : List[Any] = output_channel
snake_case : Dict = block_out_channels[i]
snake_case : Optional[Any] = i == len(snake_case__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case : List[Any] = FlaxCrossAttnDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case : Union[str, Any] = FlaxDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case__ )
snake_case : Dict = down_blocks
# mid
snake_case : Optional[int] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
snake_case : Optional[Any] = []
snake_case : Optional[int] = list(reversed(snake_case__ ) )
snake_case : Dict = list(reversed(snake_case__ ) )
snake_case : Tuple = list(reversed(snake_case__ ) )
snake_case : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
snake_case : Optional[int] = output_channel
snake_case : List[Any] = reversed_block_out_channels[i]
snake_case : Union[str, Any] = reversed_block_out_channels[min(i + 1 , len(snake_case__ ) - 1 )]
snake_case : int = i == len(snake_case__ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
snake_case : Any = FlaxCrossAttnUpBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case : Optional[int] = FlaxUpBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(snake_case__ )
snake_case : Optional[int] = output_channel
snake_case : Tuple = up_blocks
# out
snake_case : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
snake_case : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : bool = True , snake_case__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
'''simple docstring'''
if not isinstance(snake_case__ , jnp.ndarray ):
snake_case : List[Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case : Any = timesteps.astype(dtype=jnp.floataa )
snake_case : int = jnp.expand_dims(snake_case__ , 0 )
snake_case : str = self.time_proj(snake_case__ )
snake_case : str = self.time_embedding(snake_case__ )
# 2. pre-process
snake_case : int = jnp.transpose(snake_case__ , (0, 2, 3, 1) )
snake_case : List[Any] = self.conv_in(snake_case__ )
# 3. down
snake_case : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case__ , snake_case__ ):
snake_case , snake_case : List[Any] = down_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
else:
snake_case , snake_case : str = down_block(snake_case__ , snake_case__ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
snake_case : Tuple = ()
for down_block_res_sample, down_block_additional_residual in zip(
snake_case__ , snake_case__ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
snake_case : Optional[int] = new_down_block_res_samples
# 4. mid
snake_case : Optional[int] = self.mid_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
snake_case : int = down_block_res_samples[-(self.layers_per_block + 1) :]
snake_case : Optional[Any] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(snake_case__ , snake_case__ ):
snake_case : Optional[Any] = up_block(
snake_case__ , temb=snake_case__ , encoder_hidden_states=snake_case__ , res_hidden_states_tuple=snake_case__ , deterministic=not train , )
else:
snake_case : Dict = up_block(snake_case__ , temb=snake_case__ , res_hidden_states_tuple=snake_case__ , deterministic=not train )
# 6. post-process
snake_case : List[str] = self.conv_norm_out(snake_case__ )
snake_case : Any = nn.silu(snake_case__ )
snake_case : Optional[int] = self.conv_out(snake_case__ )
snake_case : Union[str, Any] = jnp.transpose(snake_case__ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=snake_case__ )
| 59
| 0
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = ["""image_processor""", """tokenizer"""]
a_ = """LayoutLMv2ImageProcessor"""
a_ = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self : List[str] , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : int ) -> Optional[int]:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase_ , )
__lowerCAmelCase = kwargs.pop('feature_extractor' )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowerCAmelCase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowerCAmelCase_ : Optional[Union[List[int], List[List[int]]]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : Dict , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
__lowerCAmelCase = self.image_processor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowerCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCAmelCase = features['words']
__lowerCAmelCase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
# add pixel values
__lowerCAmelCase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowerCAmelCase = self.get_overflowing_images(lowerCAmelCase_ , encoded_inputs['overflow_to_sample_mapping'] )
__lowerCAmelCase = images
return encoded_inputs
def lowercase ( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ) -> List[Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__lowerCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(lowerCAmelCase_ )} and {len(lowerCAmelCase_ )}""" )
return images_with_overflow
def lowercase ( self : List[Any] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Optional[int] ) -> str:
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : int , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Dict ) -> str:
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowercase ( self : Union[str, Any] ) -> List[str]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowercase ( self : Optional[Any] ) -> int:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase_ , )
return self.image_processor_class
@property
def lowercase ( self : int ) -> str:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCAmelCase_ , )
return self.image_processor
| 207
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = DanceDiffusionPipeline
a_ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
a_ = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
a_ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
a_ = False
a_ = False
def lowercase ( self : List[Any] ) -> Dict:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowerCAmelCase_ , use_timestep_embedding=lowerCAmelCase_ , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
__lowerCAmelCase = IPNDMScheduler()
__lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def lowercase ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=0 ) -> Any:
if str(lowerCAmelCase_ ).startswith('mps' ):
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
else:
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def lowercase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = DanceDiffusionPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = pipe(**lowerCAmelCase_ )
__lowerCAmelCase = output.audios
__lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
__lowerCAmelCase = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowercase ( self : Union[str, Any] ) -> Tuple:
return super().test_save_load_local()
@skip_mps
def lowercase ( self : List[str] ) -> Dict:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def lowercase ( self : str ) -> List[str]:
return super().test_save_load_optional_components()
@skip_mps
def lowercase ( self : List[Any] ) -> List[str]:
return super().test_attention_slicing_forward_pass()
def lowercase ( self : str ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = torch_device
__lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96 )
__lowerCAmelCase = output.audios
__lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__lowerCAmelCase = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self : Tuple ) -> Dict:
__lowerCAmelCase = torch_device
__lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96 )
__lowerCAmelCase = output.audios
__lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__lowerCAmelCase = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 207
| 1
|
import random
from .binary_exp_mod import bin_exp_mod
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1000 ) -> List[str]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCamelCase : List[Any] = n - 1
lowerCamelCase : Dict = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCamelCase : Optional[Any] = 0
while count < prec:
lowerCamelCase : str = random.randint(2 ,n - 1 )
lowerCamelCase : Dict = bin_exp_mod(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if b != 1:
lowerCamelCase : str = True
for _ in range(_SCREAMING_SNAKE_CASE ):
if b == n - 1:
lowerCamelCase : Tuple = False
break
lowerCamelCase : int = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 48
|
from __future__ import annotations
import requests
def A ( _SCREAMING_SNAKE_CASE ) -> dict:
lowerCamelCase : Tuple = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_SCREAMING_SNAKE_CASE ).json()
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> list[dict]:
lowerCamelCase : str = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
lowerCamelCase : Any = requests.get(_SCREAMING_SNAKE_CASE ).json()[:max_stories]
return [get_hackernews_story(_SCREAMING_SNAKE_CASE ) for story_id in story_ids]
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> str:
lowerCamelCase : str = hackernews_top_stories(_SCREAMING_SNAKE_CASE )
return "\n".join("* [{title}]({url})".format(**_SCREAMING_SNAKE_CASE ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 48
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> List[str]:
SCREAMING_SNAKE_CASE_ = create_tensor(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = gather(__UpperCAmelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = [state.process_index]
SCREAMING_SNAKE_CASE_ = gather_object(__UpperCAmelCase )
assert len(__UpperCAmelCase ) == state.num_processes, f"{gathered_obj}, {len(__UpperCAmelCase )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), f"{gathered_obj} != {list(range(state.num_processes ) )}"
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = create_tensor(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = broadcast(__UpperCAmelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Dict:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
SCREAMING_SNAKE_CASE_ = torch.arange(state.num_processes + 1 ).to(state.device )
else:
SCREAMING_SNAKE_CASE_ = torch.arange(state.num_processes ).to(state.device )
SCREAMING_SNAKE_CASE_ = pad_across_processes(__UpperCAmelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
# For now runs on only two processes
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE_ = create_tensor(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = reduce(__UpperCAmelCase , 'sum' )
SCREAMING_SNAKE_CASE_ = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase ), f"{reduced_tensor} != {truth_tensor}"
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> List[str]:
# For now runs on only two processes
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE_ = create_tensor(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = reduce(__UpperCAmelCase , 'mean' )
SCREAMING_SNAKE_CASE_ = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase ), f"{reduced_tensor} != {truth_tensor}"
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> str:
# For xla_spawn (TPUs)
main()
def UpperCAmelCase_ ( ) -> Any:
SCREAMING_SNAKE_CASE_ = PartialState()
state.print(f"State: {state}" )
state.print('testing gather' )
test_gather(__UpperCAmelCase )
state.print('testing gather_object' )
test_gather_object(__UpperCAmelCase )
state.print('testing broadcast' )
test_broadcast(__UpperCAmelCase )
state.print('testing pad_across_processes' )
test_pad_across_processes(__UpperCAmelCase )
state.print('testing reduce_sum' )
test_reduce_sum(__UpperCAmelCase )
state.print('testing reduce_mean' )
test_reduce_mean(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 210
|
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> str:
SCREAMING_SNAKE_CASE_ = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> dict[str, str]:
SCREAMING_SNAKE_CASE_ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
SCREAMING_SNAKE_CASE_ = remove_duplicates(key.upper() )
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
# First fill cipher with key characters
SCREAMING_SNAKE_CASE_ = {alphabet[i]: char for i, char in enumerate(__UpperCAmelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__UpperCAmelCase ) , 26 ):
SCREAMING_SNAKE_CASE_ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
SCREAMING_SNAKE_CASE_ = alphabet[i - offset]
SCREAMING_SNAKE_CASE_ = char
return cipher_alphabet
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : dict[str, str] ) -> str:
return "".join(cipher_map.get(__UpperCAmelCase , __UpperCAmelCase ) for ch in message.upper() )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : dict[str, str] ) -> str:
SCREAMING_SNAKE_CASE_ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__UpperCAmelCase , __UpperCAmelCase ) for ch in message.upper() )
def UpperCAmelCase_ ( ) -> None:
SCREAMING_SNAKE_CASE_ = input('Enter message to encode or decode: ' ).strip()
SCREAMING_SNAKE_CASE_ = input('Enter keyword: ' ).strip()
SCREAMING_SNAKE_CASE_ = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
SCREAMING_SNAKE_CASE_ = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
SCREAMING_SNAKE_CASE_ = create_cipher_map(__UpperCAmelCase )
print(func(__UpperCAmelCase , __UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 210
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='unispeech'
def __init__(self , a_=32 , a_=7_68 , a_=12 , a_=12 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=0.1 , a_=0.0 , a_=0.0 , a_=0.1 , a_=0.1 , a_=0.02 , a_=1E-5 , a_="group" , a_="gelu" , a_=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , a_=(5, 2, 2, 2, 2, 2, 2) , a_=(10, 3, 3, 3, 3, 2, 2) , a_=False , a_=1_28 , a_=16 , a_=False , a_=True , a_=0.05 , a_=10 , a_=2 , a_=0.0 , a_=10 , a_=0 , a_=3_20 , a_=2 , a_=0.1 , a_=1_00 , a_=2_56 , a_=2_56 , a_=0.1 , a_="mean" , a_=False , a_=False , a_=2_56 , a_=80 , a_=0 , a_=1 , a_=2 , a_=0.5 , **a_ , ):
'''simple docstring'''
super().__init__(**a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ )
__snake_case : Union[str, Any] = hidden_size
__snake_case : List[str] = feat_extract_norm
__snake_case : Optional[Any] = feat_extract_activation
__snake_case : str = list(a_ )
__snake_case : Dict = list(a_ )
__snake_case : Any = list(a_ )
__snake_case : Tuple = conv_bias
__snake_case : str = num_conv_pos_embeddings
__snake_case : List[Any] = num_conv_pos_embedding_groups
__snake_case : Tuple = len(self.conv_dim )
__snake_case : Any = num_hidden_layers
__snake_case : Dict = intermediate_size
__snake_case : Optional[Any] = hidden_act
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Tuple = hidden_dropout
__snake_case : Union[str, Any] = attention_dropout
__snake_case : List[str] = activation_dropout
__snake_case : str = feat_proj_dropout
__snake_case : Optional[Any] = final_dropout
__snake_case : Dict = layerdrop
__snake_case : str = layer_norm_eps
__snake_case : Optional[Any] = initializer_range
__snake_case : Optional[int] = num_ctc_classes
__snake_case : int = vocab_size
__snake_case : Tuple = do_stable_layer_norm
__snake_case : str = use_weighted_layer_sum
__snake_case : str = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case : Dict = apply_spec_augment
__snake_case : Optional[int] = mask_time_prob
__snake_case : Tuple = mask_time_length
__snake_case : Any = mask_time_min_masks
__snake_case : int = mask_feature_prob
__snake_case : Optional[Any] = mask_feature_length
__snake_case : Union[str, Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__snake_case : Dict = num_codevectors_per_group
__snake_case : int = num_codevector_groups
__snake_case : Optional[Any] = contrastive_logits_temperature
__snake_case : str = feat_quantizer_dropout
__snake_case : Union[str, Any] = num_negatives
__snake_case : int = codevector_dim
__snake_case : Any = proj_codevector_dim
__snake_case : Optional[int] = diversity_loss_weight
# ctc loss
__snake_case : Any = ctc_loss_reduction
__snake_case : Optional[Any] = ctc_zero_infinity
# pretraining loss
__snake_case : List[str] = replace_prob
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 102
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git_vision_model'''
def __init__( self : int , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Tuple=30_72 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=2_24 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : Optional[Any]="quick_gelu" , lowerCamelCase_ : List[Any]=1e-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Optional[Any]=0.02 , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = hidden_act
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : int ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git'''
def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=3_05_22 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=1_01 , lowerCamelCase_ : Optional[Any]=1_02 , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
if vision_config is None:
SCREAMING_SNAKE_CASE : Any = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
SCREAMING_SNAKE_CASE : int = num_image_with_embedding
SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id
SCREAMING_SNAKE_CASE : str = eos_token_id
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 323
| 0
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowercase_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
for pegasus_name, hf_name in PATTERNS:
__lowerCamelCase : Any = k.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return k
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[Any] = DEFAULTS.copy()
cfg_kwargs.update(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Any = PegasusConfig(**SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = PegasusForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = torch_model.model.state_dict()
__lowerCamelCase : Tuple = {}
for k, v in tf_weights.items():
__lowerCamelCase : Union[str, Any] = rename_state_dict_key(SCREAMING_SNAKE_CASE__ )
if new_k not in sd:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
__lowerCamelCase : Tuple = v.T
__lowerCamelCase : str = torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
__lowerCamelCase : Union[str, Any] = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__lowerCamelCase : List[str] = mapping['shared.weight']
__lowerCamelCase : int = mapping['shared.weight']
__lowerCamelCase : int = {k: torch.zeros_like(SCREAMING_SNAKE_CASE__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = torch_model.model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Dict = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__="./ckpt/aeslc/model.ckpt-32000" ):
__lowerCamelCase : int = tf.train.list_variables(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Dict = ['Adafactor', 'global_step']
for name, shape in tqdm(SCREAMING_SNAKE_CASE__ , desc='converting tf checkpoint to dict' ):
__lowerCamelCase : int = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase : str = tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Any = array
return tf_weights
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# save tokenizer first
__lowerCamelCase : str = Path(SCREAMING_SNAKE_CASE__ ).parent.name
__lowerCamelCase : Any = task_specific_params[f'summarization_{dataset}']['max_position_embeddings']
__lowerCamelCase : int = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=SCREAMING_SNAKE_CASE__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(SCREAMING_SNAKE_CASE__ )
# convert model
__lowerCamelCase : int = get_tf_weights_as_numpy(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = task_specific_params[f'summarization_{dataset}']
if dataset == "large":
__lowerCamelCase : Optional[Any] = task_specific_params
__lowerCamelCase : Any = convert_pegasus(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
torch_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(SCREAMING_SNAKE_CASE__ , Path(SCREAMING_SNAKE_CASE__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
lowercase_ = parser.parse_args()
if args.save_dir is None:
lowercase_ = Path(args.tf_ckpt_path).parent.name
lowercase_ = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 194
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase_ = logging.get_logger(__name__)
@dataclass
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self: Any , **a: Optional[Any] ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__lowerCamelCase : str = deprecated_arg[3:]
setattr(self , a , not kwargs.pop(a ) )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
__lowerCamelCase : str = kwargs.pop('torchscript' , self.torchscript )
__lowerCamelCase : int = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
__lowerCamelCase : Dict = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**a )
__snake_case = field(default=__UpperCamelCase , metadata={"""help""": """Trace the models using torchscript"""} )
__snake_case = field(default=__UpperCamelCase , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
__snake_case = field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def _snake_case ( self: Dict ):
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
__lowerCamelCase : Dict = torch.device('cpu' )
__lowerCamelCase : str = 0
elif is_torch_tpu_available():
__lowerCamelCase : Optional[int] = xm.xla_device()
__lowerCamelCase : Dict = 0
else:
__lowerCamelCase : Any = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__lowerCamelCase : Union[str, Any] = torch.cuda.device_count()
return device, n_gpu
@property
def _snake_case ( self: Optional[Any] ):
return is_torch_tpu_available() and self.tpu
@property
def _snake_case ( self: Union[str, Any] ):
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _snake_case ( self: int ):
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def _snake_case ( self: Union[str, Any] ):
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def _snake_case ( self: List[Any] ):
return self.n_gpu > 0
| 194
| 1
|
import math
from numpy import inf
from scipy.integrate import quad
def _a ( SCREAMING_SNAKE_CASE_ : float ):
if num <= 0:
raise ValueError("math domain error" )
return quad(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , args=(SCREAMING_SNAKE_CASE_) )[0]
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
return math.pow(SCREAMING_SNAKE_CASE_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 92
|
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__lowerCamelCase = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__lowerCamelCase = "main"
# Default branch name
__lowerCamelCase = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
__lowerCamelCase = "aaaaaaa"
# This commit does not exist, so we should 404.
__lowerCamelCase = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
__lowerCamelCase = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def UpperCAmelCase ( ):
"""simple docstring"""
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def UpperCAmelCase ( ):
"""simple docstring"""
print('Bonjour!' )
yield
print('Au revoir!' )
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> List[str]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class UpperCamelCase__( unittest.TestCase ):
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def snake_case__ ( self ,__UpperCAmelCase ) -> Dict:
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() ,'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def snake_case__ ( self ,__UpperCAmelCase ) -> List[str]:
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def snake_case__ ( self ,__UpperCAmelCase ) -> Any:
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def snake_case__ ( self ) -> Union[str, Any]:
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['start_positions', 'end_positions'] )
class UpperCamelCase__( __A ):
pass
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
@require_tf
def snake_case__ ( self ) -> str:
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['start_positions', 'end_positions'] )
class UpperCamelCase__( __A ):
pass
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
@require_flax
def snake_case__ ( self ) -> List[Any]:
# Flax models don't have labels
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
class UpperCamelCase__( __A ):
pass
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
| 221
| 0
|
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Tuple=2 , snake_case_ : List[Any]=3 , snake_case_ : Optional[int]=4 , snake_case_ : Any=2 , snake_case_ : Union[str, Any]=7 , snake_case_ : Dict=True , snake_case_ : Optional[Any]=True , snake_case_ : Union[str, Any]=True , snake_case_ : int=True , snake_case_ : List[Any]=99 , snake_case_ : List[Any]=36 , snake_case_ : List[Any]=2 , snake_case_ : str=4 , snake_case_ : int=37 , snake_case_ : int="gelu" , snake_case_ : Any=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : int=512 , snake_case_ : Union[str, Any]=16 , snake_case_ : Optional[Any]=2 , snake_case_ : Tuple=0.02 , snake_case_ : List[str]=6 , snake_case_ : Dict=6 , snake_case_ : Optional[Any]=3 , snake_case_ : str=4 , snake_case_ : Union[str, Any]=None , snake_case_ : Union[str, Any]=1_000 , ) -> int:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = patch_size
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = coordinate_size
A__ = shape_size
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
A__ = text_seq_length
A__ = (image_size // patch_size) ** 2 + 1
A__ = self.text_seq_length + self.image_seq_length
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
A__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
A__ = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A__ = bbox[i, j, 3]
A__ = bbox[i, j, 1]
A__ = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
A__ = bbox[i, j, 2]
A__ = bbox[i, j, 0]
A__ = tmp_coordinate
A__ = tf.constant(snake_case_ )
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.text_seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
A__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __magic_name__ ( self : List[str] , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : Dict ) -> List[str]:
'''simple docstring'''
A__ = TFLayoutLMvaModel(config=snake_case_ )
# text + image
A__ = model(snake_case_ , pixel_values=snake_case_ , training=snake_case_ )
A__ = model(
snake_case_ , bbox=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , training=snake_case_ , )
A__ = model(snake_case_ , bbox=snake_case_ , pixel_values=snake_case_ , training=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
A__ = model(snake_case_ , training=snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
A__ = model({"pixel_values": pixel_values} , training=snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __magic_name__ ( self : List[Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : str ) -> Any:
'''simple docstring'''
A__ = self.num_labels
A__ = TFLayoutLMvaForSequenceClassification(config=snake_case_ )
A__ = model(
snake_case_ , bbox=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , training=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Any , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Tuple ) -> List[str]:
'''simple docstring'''
A__ = self.num_labels
A__ = TFLayoutLMvaForTokenClassification(config=snake_case_ )
A__ = model(
snake_case_ , bbox=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , training=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : List[str] ) -> List[str]:
'''simple docstring'''
A__ = 2
A__ = TFLayoutLMvaForQuestionAnswering(config=snake_case_ )
A__ = model(
snake_case_ , bbox=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , training=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(A__) = config_and_inputs
A__ = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
lowercase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase__ = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __magic_name__ ( self : int , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Dict , snake_case_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
return True
def __magic_name__ ( self : int , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : int=False ) -> dict:
'''simple docstring'''
A__ = copy.deepcopy(snake_case_ )
if model_class in get_values(snake_case_ ):
A__ = {
k: tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(snake_case_ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(snake_case_ ):
A__ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(snake_case_ ):
A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(snake_case_ ):
A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(snake_case_ ):
A__ = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __magic_name__ ( self : Any ) -> int:
'''simple docstring'''
A__ = TFLayoutLMvaModelTester(self )
A__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def __magic_name__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(snake_case_ )
if getattr(snake_case_ , "hf_compute_loss" , snake_case_ ):
# The number of elements in the loss should be the same as the number of elements in the label
A__ = self._prepare_for_class(inputs_dict.copy() , snake_case_ , return_labels=snake_case_ )
A__ = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=snake_case_ )[0]
]
A__ = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
A__ = self._prepare_for_class(inputs_dict.copy() , snake_case_ , return_labels=snake_case_ )
A__ = prepared_for_class.pop("input_ids" )
A__ = model(snake_case_ , **snake_case_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
A__ = self._prepare_for_class(inputs_dict.copy() , snake_case_ , return_labels=snake_case_ )
A__ = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
A__ = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
A__ = -100
A__ = tf.convert_to_tensor(snake_case_ )
A__ = model(snake_case_ , **snake_case_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
A__ = self._prepare_for_class(inputs_dict.copy() , snake_case_ , return_labels=snake_case_ )
A__ = model(snake_case_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
A__ = self._prepare_for_class(inputs_dict.copy() , snake_case_ , return_labels=snake_case_ )
# Get keys that were added with the _prepare_for_class function
A__ = prepared_for_class.keys() - inputs_dict.keys()
A__ = inspect.signature(model.call ).parameters
A__ = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
A__ = {0: """input_ids"""}
for label_key in label_keys:
A__ = signature_names.index(snake_case_ )
A__ = label_key
A__ = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
A__ = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
A__ = prepared_for_class[value]
A__ = tuple(snake_case_ )
# Send to model
A__ = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __magic_name__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
(
A__
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
(
A__
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
(
A__
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
(
A__
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
(
A__
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
@slow
def __magic_name__ ( self : str ) -> Optional[int]:
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFLayoutLMvaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=snake_case_ ) if is_vision_available() else None
@slow
def __magic_name__ ( self : str ) -> str:
'''simple docstring'''
A__ = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=snake_case_ , return_tensors="tf" ).pixel_values
A__ = tf.constant([[1, 2]] )
A__ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
A__ = model(input_ids=snake_case_ , bbox=snake_case_ , pixel_values=snake_case_ , training=snake_case_ )
# verify the logits
A__ = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , snake_case_ )
A__ = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case_ , atol=1e-4 ) )
| 363
|
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : Optional[int] , snake_case_ : int = 16 , snake_case_ : int = 88 , snake_case_ : Optional[int] = None , snake_case_ : int = 1 , snake_case_ : float = 0.0 , snake_case_ : int = 32 , snake_case_ : Optional[int] = None , snake_case_ : bool = False , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = None , snake_case_ : str = "geglu" , snake_case_ : Optional[int] = None , ) -> str:
'''simple docstring'''
super().__init__()
A__ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=snake_case_ , attention_head_dim=snake_case_ , in_channels=snake_case_ , num_layers=snake_case_ , dropout=snake_case_ , norm_num_groups=snake_case_ , cross_attention_dim=snake_case_ , attention_bias=snake_case_ , sample_size=snake_case_ , num_vector_embeds=snake_case_ , activation_fn=snake_case_ , num_embeds_ada_norm=snake_case_ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
A__ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
A__ = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
A__ = [1, 0]
def __magic_name__ ( self : Dict , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Any=None , snake_case_ : int=None , snake_case_ : Union[str, Any]=None , snake_case_ : bool = True , ) -> Union[str, Any]:
'''simple docstring'''
A__ = hidden_states
A__ = []
A__ = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
A__ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
A__ = self.transformer_index_for_condition[i]
A__ = self.transformers[transformer_index](
snake_case_ , encoder_hidden_states=snake_case_ , timestep=snake_case_ , cross_attention_kwargs=snake_case_ , return_dict=snake_case_ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
A__ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
A__ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=snake_case_ )
| 230
| 0
|
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
a__ : List[str] = 10
def UpperCAmelCase_( a__ , a__ , a__ , a__ ):
"""simple docstring"""
for i in range(a__ , a__ ):
if array[i] == target:
return i
return -1
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = len(a__ )
while left <= right:
if right - left < precision:
return lin_search(a__ , a__ , a__ , a__ )
SCREAMING_SNAKE_CASE : Any = (left + right) // 3 + 1
SCREAMING_SNAKE_CASE : List[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
SCREAMING_SNAKE_CASE : Union[str, Any] = one_third - 1
elif array[two_third] < target:
SCREAMING_SNAKE_CASE : Optional[Any] = two_third + 1
else:
SCREAMING_SNAKE_CASE : Tuple = one_third + 1
SCREAMING_SNAKE_CASE : List[str] = two_third - 1
else:
return -1
def UpperCAmelCase_( a__ , a__ , a__ , a__ ):
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(a__ , a__ , a__ , a__ )
SCREAMING_SNAKE_CASE : Tuple = (left + right) // 3 + 1
SCREAMING_SNAKE_CASE : Dict = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(a__ , one_third - 1 , a__ , a__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , a__ , a__ , a__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , a__ , a__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : List[str] = input('''Enter numbers separated by comma:\n''').strip()
a__ : List[Any] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
a__ : Optional[Any] = int(input('''Enter the number to be found in the list:\n''').strip())
a__ : Any = ite_ternary_search(collection, target)
a__ : Any = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"Iterative search: {target} found at positions: {resulta}")
print(F"Recursive search: {target} found at positions: {resulta}")
else:
print('''Not found''')
| 313
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : jnp.ndarray
@flax_register_to_config
class a_ ( nn.Module , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
__SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1280, 1280)
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
__SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
__SCREAMING_SNAKE_CASE : int = 1280
__SCREAMING_SNAKE_CASE : float = 0.0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : bool = False
def __lowerCAmelCase ( self , _lowerCamelCase ) ->FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"]
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : List[str] = self.block_out_channels
SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE : List[str] = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE : Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE : Dict = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.only_cross_attention
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : int = block_out_channels[i]
SCREAMING_SNAKE_CASE : List[Any] = i == len(_lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = down_blocks
# mid
SCREAMING_SNAKE_CASE : int = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : str = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )]
SCREAMING_SNAKE_CASE : Dict = i == len(_lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
SCREAMING_SNAKE_CASE : str = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = FlaxUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Tuple = up_blocks
# out
SCREAMING_SNAKE_CASE : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_lowerCamelCase , jnp.ndarray ):
SCREAMING_SNAKE_CASE : int = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE : List[str] = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_lowerCamelCase , 0 )
SCREAMING_SNAKE_CASE : List[str] = self.time_proj(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.time_embedding(_lowerCamelCase )
# 2. pre-process
SCREAMING_SNAKE_CASE : int = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_in(_lowerCamelCase )
# 3. down
SCREAMING_SNAKE_CASE : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
SCREAMING_SNAKE_CASE : int = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCamelCase , _lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE : Dict = new_down_block_res_samples
# 4. mid
SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
SCREAMING_SNAKE_CASE : Optional[int] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = up_block(
_lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train )
# 6. post-process
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.silu(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.conv_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
| 313
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if gpta_config_file == "":
A__ = GPTaConfig()
else:
A__ = GPTaConfig.from_json_file(UpperCamelCase__ )
A__ = GPTaModel(UpperCamelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
A__ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
A__ = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
__lowerCamelCase = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 154
|
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = fname.split(os.path.sep )[-1]
return re.search(r'^(.*)_\d+\.jpg$' , UpperCamelCase__ ).groups()[0]
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> List[str]:
A__ = file_names
A__ = image_transform
A__ = label_to_id
def __len__( self ) -> Dict:
return len(self.file_names )
def __getitem__( self ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = self.file_names[idx]
A__ = PIL.Image.open(__UpperCAmelCase )
A__ = raw_image.convert('RGB' )
if self.image_transform is not None:
A__ = self.image_transform(__UpperCAmelCase )
A__ = extract_label(__UpperCAmelCase )
if self.label_to_id is not None:
A__ = self.label_to_id[label]
return {"image": image, "label": label}
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if args.with_tracking:
A__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['lr']
A__ = int(config['num_epochs'] )
A__ = int(config['seed'] )
A__ = int(config['batch_size'] )
A__ = config['image_size']
if not isinstance(UpperCamelCase__ , (list, tuple) ):
A__ = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
A__ = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
A__ = int(args.checkpointing_steps )
else:
raise ValueError(
F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
A__ = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
A__ = os.path.split(UpperCamelCase__ )[-1].split('.' )[0]
accelerator.init_trackers(UpperCamelCase__ , UpperCamelCase__ )
# Grab all the image filenames
A__ = [os.path.join(args.data_dir , UpperCamelCase__ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
A__ = [extract_label(UpperCamelCase__ ) for fname in file_names]
A__ = list(set(UpperCamelCase__ ) )
id_to_label.sort()
A__ = {lbl: i for i, lbl in enumerate(UpperCamelCase__ )}
# Set the seed before splitting the data.
np.random.seed(UpperCamelCase__ )
torch.manual_seed(UpperCamelCase__ )
torch.cuda.manual_seed_all(UpperCamelCase__ )
# Split our filenames between train and validation
A__ = np.random.permutation(len(UpperCamelCase__ ) )
A__ = int(0.8 * len(UpperCamelCase__ ) )
A__ = random_perm[:cut]
A__ = random_perm[cut:]
# For training we use a simple RandomResizedCrop
A__ = Compose([RandomResizedCrop(UpperCamelCase__ , scale=(0.5, 1.0) ), ToTensor()] )
A__ = PetsDataset(
[file_names[i] for i in train_split] , image_transform=UpperCamelCase__ , label_to_id=UpperCamelCase__ )
# For evaluation, we use a deterministic Resize
A__ = Compose([Resize(UpperCamelCase__ ), ToTensor()] )
A__ = PetsDataset([file_names[i] for i in eval_split] , image_transform=UpperCamelCase__ , label_to_id=UpperCamelCase__ )
# Instantiate dataloaders.
A__ = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
A__ = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = create_model('resnet50d' , pretrained=UpperCamelCase__ , num_classes=len(UpperCamelCase__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
A__ = False
for param in model.get_classifier().parameters():
A__ = True
# We normalize the batches of images to be a bit faster.
A__ = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
A__ = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
A__ = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
A__ = OneCycleLR(optimizer=UpperCamelCase__ , max_lr=UpperCamelCase__ , epochs=UpperCamelCase__ , steps_per_epoch=len(UpperCamelCase__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# We need to keep track of how many total steps we have iterated over
A__ = 0
# We also need to keep track of the starting epoch so files are named properly
A__ = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
A__ = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
A__ = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
A__ = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
A__ = os.path.splitext(UpperCamelCase__ )[0]
if "epoch" in training_difference:
A__ = int(training_difference.replace('epoch_' , '' ) ) + 1
A__ = None
else:
A__ = int(training_difference.replace('step_' , '' ) )
A__ = resume_step // len(UpperCamelCase__ )
resume_step -= starting_epoch * len(UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ , UpperCamelCase__ ):
model.train()
if args.with_tracking:
A__ = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
A__ = accelerator.skip_first_batches(UpperCamelCase__ , UpperCamelCase__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
A__ = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
A__ = {k: v.to(accelerator.device ) for k, v in batch.items()}
A__ = (batch['image'] - mean) / std
A__ = model(UpperCamelCase__ )
A__ = torch.nn.functional.cross_entropy(UpperCamelCase__ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(UpperCamelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = F'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
A__ = os.path.join(args.output_dir , UpperCamelCase__ )
accelerator.save_state(UpperCamelCase__ )
model.eval()
A__ = 0
A__ = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
A__ = {k: v.to(accelerator.device ) for k, v in batch.items()}
A__ = (batch['image'] - mean) / std
with torch.no_grad():
A__ = model(UpperCamelCase__ )
A__ = outputs.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['label']) )
A__ = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
A__ = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}: {100 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(UpperCamelCase__ ),
'epoch': epoch,
} , step=UpperCamelCase__ , )
if checkpointing_steps == "epoch":
A__ = F'''epoch_{epoch}'''
if args.output_dir is not None:
A__ = os.path.join(args.output_dir , UpperCamelCase__ )
accelerator.save_state(UpperCamelCase__ )
if args.with_tracking:
accelerator.end_training()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=UpperCamelCase__ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=UpperCamelCase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=UpperCamelCase__ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
A__ = parser.parse_args()
A__ = {'lr': 3E-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 154
| 1
|
import argparse
import os
import re
import packaging.version
_A : Optional[int] = 'examples/'
_A : str = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
_A : Any = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
_A : List[str] = 'README.md'
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase__ : Tuple = f.read()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern]
lowerCamelCase__ : Union[str, Any] = replace.replace('''VERSION''' , UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = re_pattern.sub(UpperCAmelCase , UpperCAmelCase )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCAmelCase )
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , pattern='''examples''' )
def _a ( UpperCAmelCase , UpperCAmelCase=False ) -> Dict:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not patch:
update_version_in_examples(UpperCAmelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Any = '''🤗 Transformers currently provides the following architectures'''
lowerCamelCase__ : Dict = '''1. Want to contribute a new model?'''
with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase__ : str = f.readlines()
# Find the start of the list.
lowerCamelCase__ : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowerCamelCase__ : Any = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCAmelCase )
def _a ( ) -> Any:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowerCamelCase__ : List[str] = f.read()
lowerCamelCase__ : Any = REPLACE_PATTERNS['''init'''][0].search(UpperCAmelCase ).groups()[0]
return packaging.version.parse(UpperCAmelCase )
def _a ( UpperCAmelCase=False ) -> str:
"""simple docstring"""
lowerCamelCase__ : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowerCamelCase__ : Union[str, Any] = default_version.base_version
elif patch:
lowerCamelCase__ : str = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
lowerCamelCase__ : Dict = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : str = input(f"Which version are you releasing? [{default_version}]" )
if len(UpperCAmelCase ) == 0:
lowerCamelCase__ : int = default_version
print(f"Updating version to {version}." )
global_version_update(UpperCAmelCase , patch=UpperCAmelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : List[str] = get_version()
lowerCamelCase__ : Optional[int] = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
lowerCamelCase__ : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : Dict = input(f"Which version are we developing now? [{dev_version}]" )
if len(UpperCAmelCase ) == 0:
lowerCamelCase__ : Tuple = dev_version
print(f"Updating version to {version}." )
global_version_update(UpperCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_A : Any = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
_A : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 142
|
def _a ( UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
lowerCamelCase__ : List[str] = str(bin(UpperCAmelCase ) )[2:] # remove the leading "0b"
lowerCamelCase__ : List[Any] = str(bin(UpperCAmelCase ) )[2:]
lowerCamelCase__ : Dict = max(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase ) , b_binary.zfill(UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142
| 1
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : int , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = params
SCREAMING_SNAKE_CASE_ = np.array(__magic_name__ )
SCREAMING_SNAKE_CASE_ = np.array([len(__magic_name__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : str , __magic_name__ : Tuple ) -> Tuple:
return (self.token_ids[index], self.lengths[index])
def __len__( self : Union[str, Any] ) -> Any:
return len(self.lengths )
def __A ( self : Tuple ) -> List[str]:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __A ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.params.max_model_input_size
SCREAMING_SNAKE_CASE_ = self.lengths > max_len
logger.info(F'''Splitting {sum(__magic_name__ )} too long sequences.''' )
def divide_chunks(__magic_name__ : str , __magic_name__ : List[str] ):
return [l[i : i + n] for i in range(0 , len(__magic_name__ ) , __magic_name__ )]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
if self.params.mlm:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
SCREAMING_SNAKE_CASE_ = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
SCREAMING_SNAKE_CASE_ = np.insert(__magic_name__ , 0 , __magic_name__ )
if sub_s[-1] != sep_id:
SCREAMING_SNAKE_CASE_ = np.insert(__magic_name__ , len(__magic_name__ ) , __magic_name__ )
assert len(__magic_name__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__magic_name__ )
new_tok_ids.extend(__magic_name__ )
new_lengths.extend([len(__magic_name__ ) for l in sub_seqs] )
SCREAMING_SNAKE_CASE_ = np.array(__magic_name__ )
SCREAMING_SNAKE_CASE_ = np.array(__magic_name__ )
def __A ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE_ = len(self )
SCREAMING_SNAKE_CASE_ = self.lengths > 11
SCREAMING_SNAKE_CASE_ = self.token_ids[indices]
SCREAMING_SNAKE_CASE_ = self.lengths[indices]
SCREAMING_SNAKE_CASE_ = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def __A ( self : Optional[int] ) -> Tuple:
if "unk_token" not in self.params.special_tok_ids:
return
else:
SCREAMING_SNAKE_CASE_ = self.params.special_tok_ids["unk_token"]
SCREAMING_SNAKE_CASE_ = len(self )
SCREAMING_SNAKE_CASE_ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
SCREAMING_SNAKE_CASE_ = (unk_occs / self.lengths) < 0.5
SCREAMING_SNAKE_CASE_ = self.token_ids[indices]
SCREAMING_SNAKE_CASE_ = self.lengths[indices]
SCREAMING_SNAKE_CASE_ = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def __A ( self : Any ) -> Any:
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __A ( self : Tuple , __magic_name__ : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = [t[0] for t in batch]
SCREAMING_SNAKE_CASE_ = [t[1] for t in batch]
assert len(__magic_name__ ) == len(__magic_name__ )
# Max for paddings
SCREAMING_SNAKE_CASE_ = max(__magic_name__ )
# Pad token ids
if self.params.mlm:
SCREAMING_SNAKE_CASE_ = self.params.special_tok_ids["pad_token"]
else:
SCREAMING_SNAKE_CASE_ = self.params.special_tok_ids["unk_token"]
SCREAMING_SNAKE_CASE_ = [list(t.astype(__magic_name__ ) ) + [pad_idx] * (max_seq_len_ - len(__magic_name__ )) for t in token_ids]
assert len(tk_ ) == len(__magic_name__ )
assert all(len(__magic_name__ ) == max_seq_len_ for t in tk_ )
SCREAMING_SNAKE_CASE_ = torch.tensor(tk_ ) # (bs, max_seq_len_)
SCREAMING_SNAKE_CASE_ = torch.tensor(__magic_name__ ) # (bs)
return tk_t, lg_t
| 369
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''ViltImageProcessor'''
lowerCamelCase__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[int] , __magic_name__ : str=None , __magic_name__ : List[str]=None , **__magic_name__ : Any ) -> str:
SCREAMING_SNAKE_CASE_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __magic_name__ , )
SCREAMING_SNAKE_CASE_ = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self.image_processor
def __call__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __magic_name__ : bool = True , __magic_name__ : Union[bool, str, PaddingStrategy] = False , __magic_name__ : Union[bool, str, TruncationStrategy] = None , __magic_name__ : Optional[int] = None , __magic_name__ : int = 0 , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, TensorType]] = None , **__magic_name__ : str , ) -> BatchEncoding:
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
# add pixel_values + pixel_mask
SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ )
encoding.update(__magic_name__ )
return encoding
def __A ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Optional[Any] ) -> Any:
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def __A ( self : Dict , *__magic_name__ : List[Any] , **__magic_name__ : Union[str, Any] ) -> str:
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def __A ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self : Dict ) -> List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __magic_name__ , )
return self.image_processor_class
@property
def __A ( self : int ) -> List[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __magic_name__ , )
return self.image_processor
| 305
| 0
|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : CommonSchedulerState
# setable values
lowerCamelCase : jnp.ndarray
lowerCamelCase : jnp.ndarray
lowerCamelCase : Optional[int] = None
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return cls(common=SCREAMING_SNAKE_CASE_ , init_noise_sigma=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ )
@dataclass
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : DDPMSchedulerState
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Dict = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCamelCase : jnp.dtype
@property
def lowercase_ ( self ) -> Any:
return True
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE_ = 10_00 , SCREAMING_SNAKE_CASE_ = 0.0_0_0_1 , SCREAMING_SNAKE_CASE_ = 0.0_2 , SCREAMING_SNAKE_CASE_ = "linear" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "fixed_small" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = "epsilon" , SCREAMING_SNAKE_CASE_ = jnp.floataa , ) -> int:
__lowerCamelCase : List[str] = dtype
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ = None ) -> DDPMSchedulerState:
if common is None:
__lowerCamelCase : Optional[int] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__lowerCamelCase : Any = jnp.array(1.0 , dtype=self.dtype )
__lowerCamelCase : int = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=SCREAMING_SNAKE_CASE_ , init_noise_sigma=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ , )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> jnp.ndarray:
return sample
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = () ) -> DDPMSchedulerState:
__lowerCamelCase : int = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__lowerCamelCase : Union[str, Any] = (jnp.arange(0 , SCREAMING_SNAKE_CASE_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ , )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) -> Tuple:
__lowerCamelCase : Any = state.common.alphas_cumprod[t]
__lowerCamelCase : Union[str, Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__lowerCamelCase : Optional[int] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__lowerCamelCase : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__lowerCamelCase : str = jnp.clip(SCREAMING_SNAKE_CASE_ , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__lowerCamelCase : Optional[Any] = jnp.log(jnp.clip(SCREAMING_SNAKE_CASE_ , a_min=1E-20 ) )
elif variance_type == "fixed_large":
__lowerCamelCase : Optional[int] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__lowerCamelCase : Union[str, Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__lowerCamelCase : str = variance
__lowerCamelCase : Union[str, Any] = state.common.betas[t]
__lowerCamelCase : Dict = (predicted_variance + 1) / 2
__lowerCamelCase : Dict = frac * max_log + (1 - frac) * min_log
return variance
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
__lowerCamelCase : List[str] = timestep
if key is None:
__lowerCamelCase : Any = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__lowerCamelCase , __lowerCamelCase : List[str] = jnp.split(SCREAMING_SNAKE_CASE_ , sample.shape[1] , axis=1 )
else:
__lowerCamelCase : List[Any] = None
# 1. compute alphas, betas
__lowerCamelCase : str = state.common.alphas_cumprod[t]
__lowerCamelCase : Union[str, Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__lowerCamelCase : Union[str, Any] = 1 - alpha_prod_t
__lowerCamelCase : List[Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__lowerCamelCase : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__lowerCamelCase : str = model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__lowerCamelCase : List[str] = jnp.clip(SCREAMING_SNAKE_CASE_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase : Tuple = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__lowerCamelCase : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__lowerCamelCase : Union[str, Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , num=1 )
__lowerCamelCase : Any = jax.random.normal(SCREAMING_SNAKE_CASE_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , predicted_variance=SCREAMING_SNAKE_CASE_ ) ** 0.5) * noise
__lowerCamelCase : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__lowerCamelCase : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ , state=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> jnp.ndarray:
return add_noise_common(state.common , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> jnp.ndarray:
return get_velocity_common(state.common , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __len__( self ) -> Any:
return self.config.num_train_timesteps
| 185
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] ) -> str:
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Optional[int] = len(UpperCAmelCase_ )
for i in range(n - 1 ):
for j in range(i + 1 , UpperCAmelCase_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> Optional[int]:
if len(UpperCAmelCase_ ) <= 1:
return arr, 0
__lowerCamelCase : str = len(UpperCAmelCase_ ) // 2
__lowerCamelCase : List[Any] = arr[0:mid]
__lowerCamelCase : List[str] = arr[mid:]
__lowerCamelCase , __lowerCamelCase : int = count_inversions_recursive(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = count_inversions_recursive(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : Any = _count_cross_inversions(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : Optional[int] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] ) -> Optional[Any]:
__lowerCamelCase : List[str] = []
__lowerCamelCase : Optional[int] = 0
while i < len(UpperCAmelCase_ ) and j < len(UpperCAmelCase_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(UpperCAmelCase_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(UpperCAmelCase_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def UpperCAmelCase__ ( ) -> List[str]:
__lowerCamelCase : Any = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__lowerCamelCase : Optional[Any] = count_inversions_bf(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : Dict = count_inversions_recursive(UpperCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , UpperCAmelCase_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__lowerCamelCase : Optional[Any] = count_inversions_bf(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : int = count_inversions_recursive(UpperCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , UpperCAmelCase_ )
# an empty list should also have zero inversions
__lowerCamelCase : Dict = []
__lowerCamelCase : Optional[Any] = count_inversions_bf(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = count_inversions_recursive(UpperCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 185
| 1
|
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : float , __lowercase : float ) -> float:
'''simple docstring'''
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(__lowercase ) * abs(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 354
|
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Dict ) -> Dict:
'''simple docstring'''
assert isinstance(__lowercase , __lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Optional[int] , __lowercase : List[Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = ParquetDatasetReader(__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : str , __lowercase : int ) -> int:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = ParquetDatasetReader(__lowercase , features=__lowercase , cache_dir=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Optional[Any] , __lowercase : Optional[Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = ParquetDatasetReader(__lowercase , cache_dir=__lowercase , split=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : Tuple ) -> Tuple:
'''simple docstring'''
if issubclass(__lowercase , __lowercase ):
_UpperCAmelCase = parquet_path
elif issubclass(__lowercase , __lowercase ):
_UpperCAmelCase = [parquet_path]
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = ParquetDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : Any , __lowercase : Union[str, Any]=("train",) ) -> List[str]:
'''simple docstring'''
assert isinstance(__lowercase , __lowercase )
for split in splits:
_UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def UpperCAmelCase_ ( __lowercase : List[str] , __lowercase : str , __lowercase : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : List[str] , __lowercase : Optional[Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = ParquetDatasetReader({"train": parquet_path} , features=__lowercase , cache_dir=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : int ) -> List[Any]:
'''simple docstring'''
if split:
_UpperCAmelCase = {split: parquet_path}
else:
_UpperCAmelCase = "train"
_UpperCAmelCase = {"train": parquet_path, "test": parquet_path}
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = ParquetDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : List[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = ParquetDatasetWriter(__lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_UpperCAmelCase = pq.ParquetFile(tmp_path / "foo.parquet" )
_UpperCAmelCase = pf.read()
assert dataset.data.table == output_table
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = str(shared_datadir / "test_image_rgb.jpg" )
_UpperCAmelCase = {"image": [image_path]}
_UpperCAmelCase = Features({"image": Image()} )
_UpperCAmelCase = Dataset.from_dict(__lowercase , features=__lowercase )
_UpperCAmelCase = ParquetDatasetWriter(__lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_UpperCAmelCase = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
_UpperCAmelCase = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Tuple ) -> Optional[int]:
'''simple docstring'''
assert get_writer_batch_size(__lowercase ) == expected
| 156
| 0
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = multiprocessing.Manager()
lowercase__ = manager.list()
lowercase__ = multiprocessing.Process(target=SCREAMING_SNAKE_CASE , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
lowercase__ = shutil.rmtree
lowercase__ = os.rmdir
lowercase__ = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
lowercase__ = {}
with swallow_io():
with time_limit(SCREAMING_SNAKE_CASE ):
exec(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(f'failed: {e}' )
# Needed for cleaning up.
lowercase__ = rmtree
lowercase__ = rmdir
lowercase__ = chdir
@contextlib.contextmanager
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def signal_handler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , SCREAMING_SNAKE_CASE )
signal.signal(signal.SIGALRM , SCREAMING_SNAKE_CASE )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def _a ( ):
"""simple docstring"""
lowercase__ = WriteOnlyStringIO()
with contextlib.redirect_stdout(SCREAMING_SNAKE_CASE ):
with contextlib.redirect_stderr(SCREAMING_SNAKE_CASE ):
with redirect_stdin(SCREAMING_SNAKE_CASE ):
yield
@contextlib.contextmanager
def _a ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as dirname:
with chdir(SCREAMING_SNAKE_CASE ):
yield dirname
class _a ( UpperCamelCase__ ):
pass
class _a ( io.StringIO ):
def lowerCamelCase_ ( self: Optional[Any] , *UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
raise OSError
def lowerCamelCase_ ( self: int , *UpperCamelCase_: Optional[Any] , **UpperCamelCase_: Optional[Any] ) -> Tuple:
"""simple docstring"""
raise OSError
def lowerCamelCase_ ( self: Union[str, Any] , *UpperCamelCase_: str , **UpperCamelCase_: str ) -> Tuple:
"""simple docstring"""
raise OSError
def lowerCamelCase_ ( self: str , *UpperCamelCase_: int , **UpperCamelCase_: Dict ) -> Tuple:
"""simple docstring"""
return False
class _a ( contextlib._RedirectStream ): # type: ignore
_lowercase : Union[str, Any] = '''stdin'''
@contextlib.contextmanager
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if root == ".":
yield
return
lowercase__ = os.getcwd()
os.chdir(SCREAMING_SNAKE_CASE )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
lowercase__ = None
lowercase__ = None
import os
lowercase__ = '''1'''
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
import shutil
lowercase__ = None
lowercase__ = None
lowercase__ = None
import subprocess
lowercase__ = None # type: ignore
lowercase__ = None
import sys
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
| 110
|
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(SCREAMING_SNAKE_CASE , exponent // 2 , SCREAMING_SNAKE_CASE ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(SCREAMING_SNAKE_CASE , exponent - 1 , SCREAMING_SNAKE_CASE )) % modulo_value
def _a ( SCREAMING_SNAKE_CASE = 17_77 , SCREAMING_SNAKE_CASE = 18_55 , SCREAMING_SNAKE_CASE = 8 ):
"""simple docstring"""
lowercase__ = base
for _ in range(1 , SCREAMING_SNAKE_CASE ):
lowercase__ = _modexpt(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 110
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
'''simple docstring'''
def __init__( self: int , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int]=3 , _SCREAMING_SNAKE_CASE: Any=32 , _SCREAMING_SNAKE_CASE: Union[str, Any]=3 , _SCREAMING_SNAKE_CASE: List[Any]=10 , _SCREAMING_SNAKE_CASE: Dict=[8, 16, 32, 64] , _SCREAMING_SNAKE_CASE: Any=[1, 1, 2, 1] , _SCREAMING_SNAKE_CASE: Tuple=True , _SCREAMING_SNAKE_CASE: Dict=True , _SCREAMING_SNAKE_CASE: Optional[int]="relu" , _SCREAMING_SNAKE_CASE: Tuple=3 , _SCREAMING_SNAKE_CASE: List[str]=None , _SCREAMING_SNAKE_CASE: int=["stage2", "stage3", "stage4"] , _SCREAMING_SNAKE_CASE: Any=[2, 3, 4] , _SCREAMING_SNAKE_CASE: Dict=1 , ) -> int:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : Tuple = batch_size
__lowerCAmelCase : str = image_size
__lowerCAmelCase : Any = num_channels
__lowerCAmelCase : str = embeddings_size
__lowerCAmelCase : List[Any] = hidden_sizes
__lowerCAmelCase : Union[str, Any] = depths
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Union[str, Any] = use_labels
__lowerCAmelCase : Tuple = hidden_act
__lowerCAmelCase : int = num_labels
__lowerCAmelCase : str = scope
__lowerCAmelCase : int = len(snake_case_)
__lowerCAmelCase : List[Any] = out_features
__lowerCAmelCase : int = out_indices
__lowerCAmelCase : Tuple = num_groups
def _SCREAMING_SNAKE_CASE ( self: int) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCAmelCase : Any = None
if self.use_labels:
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_labels)
__lowerCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Union[str, Any]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Tuple) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = BitModel(config=snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase : List[str] = model(snake_case_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str]) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Dict = self.num_labels
__lowerCAmelCase : Dict = BitForImageClassification(snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase : Tuple = model(snake_case_ , labels=snake_case_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[int]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = BitBackbone(config=snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase : Any = model(snake_case_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
__lowerCAmelCase : Dict = None
__lowerCAmelCase : List[Any] = BitBackbone(config=snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase : List[Any] = model(snake_case_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
__lowerCAmelCase : Tuple = config_and_inputs
__lowerCAmelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Dict = BitModelTester(self)
__lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_)
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Union[str, Any]:
"""simple docstring"""
return
@unittest.skip(reason="Bit does not output attentions")
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not use inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not support input and output embeddings")
def _SCREAMING_SNAKE_CASE ( self: str) -> int:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: Any) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Optional[Any] = model_class(snake_case_)
__lowerCAmelCase : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
__lowerCAmelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_)
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case_)
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : List[str] = model_class(config=snake_case_)
for name, module in model.named_modules():
if isinstance(snake_case_ , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(_SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int]):
__lowerCAmelCase : str = model_class(snake_case_)
model.to(snake_case_)
model.eval()
with torch.no_grad():
__lowerCAmelCase : int = model(**self._prepare_for_class(snake_case_ , snake_case_))
__lowerCAmelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(snake_case_) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Tuple = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCAmelCase : Dict = layer_type
__lowerCAmelCase : Dict = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase : Union[str, Any] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_)
@unittest.skip(reason="Bit does not use feedforward chunking")
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: str) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_)
@slow
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[str]:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = BitModel.from_pretrained(snake_case_)
self.assertIsNotNone(snake_case_)
def _lowercase ( ) -> Any:
__lowerCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[str]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(snake_case_)
__lowerCAmelCase : Any = self.default_image_processor
__lowerCAmelCase : Union[str, Any] = prepare_img()
__lowerCAmelCase : Optional[Any] = image_processor(images=snake_case_ , return_tensors="pt").to(snake_case_)
# forward pass
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = model(**snake_case_)
# verify the logits
__lowerCAmelCase : Dict = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , snake_case_)
__lowerCAmelCase : str = torch.tensor([[-0.6526, -0.5263, -1.4398]]).to(snake_case_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4))
@require_torch
class A__ ( _a , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (BitBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = BitConfig
SCREAMING_SNAKE_CASE = False
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = BitModelTester(self)
| 371
|
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__snake_case : str = logging.get_logger(__name__)
# General docstring
__snake_case : Optional[int] = 'PoolFormerConfig'
# Base docstring
__snake_case : Any = 'sail/poolformer_s12'
__snake_case : Optional[Any] = [1, 512, 7, 7]
# Image classification docstring
__snake_case : List[Any] = 'sail/poolformer_s12'
__snake_case : Optional[Any] = 'tabby, tabby cat'
__snake_case : Union[str, Any] = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def _lowercase ( __snake_case ,__snake_case = 0.0 ,__snake_case = False ) -> Tuple:
if drop_prob == 0.0 or not training:
return input
__lowerCAmelCase : Optional[int] = 1 - drop_prob
__lowerCAmelCase : Union[str, Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__lowerCAmelCase : List[str] = keep_prob + torch.rand(__snake_case ,dtype=input.dtype ,device=input.device )
random_tensor.floor_() # binarize
__lowerCAmelCase : Tuple = input.div(__snake_case ) * random_tensor
return output
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[float] = None) -> None:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Dict = drop_prob
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: torch.Tensor) -> torch.Tensor:
"""simple docstring"""
return drop_path(_SCREAMING_SNAKE_CASE , self.drop_prob , self.training)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob)
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Any=None) -> int:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Optional[int] = patch_size if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable) else (patch_size, patch_size)
__lowerCAmelCase : Any = stride if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable) else (stride, stride)
__lowerCAmelCase : Any = padding if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable) else (padding, padding)
__lowerCAmelCase : Optional[int] = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = norm_layer(_SCREAMING_SNAKE_CASE) if norm_layer else nn.Identity()
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : str = self.projection(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = self.norm(_SCREAMING_SNAKE_CASE)
return embeddings
class A__ ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: List[Any]) -> Tuple:
"""simple docstring"""
super().__init__(1 , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: int , _SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Dict = nn.AvgPoolad(_SCREAMING_SNAKE_CASE , stride=1 , padding=pool_size // 2 , count_include_pad=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Dict) -> Dict:
"""simple docstring"""
return self.pool(_SCREAMING_SNAKE_CASE) - hidden_states
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Dict = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1)
__lowerCAmelCase : Tuple = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1)
__lowerCAmelCase : Any = PoolFormerDropPath(_SCREAMING_SNAKE_CASE)
if isinstance(config.hidden_act , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Optional[int] = ACTaFN[config.hidden_act]
else:
__lowerCAmelCase : int = config.hidden_act
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = self.conva(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = self.act_fn(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = self.drop(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = self.conva(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = self.drop(_SCREAMING_SNAKE_CASE)
return hidden_states
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]) -> str:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : List[str] = PoolFormerPooling(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = PoolFormerOutput(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = PoolFormerGroupNorm(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = PoolFormerGroupNorm(_SCREAMING_SNAKE_CASE)
# Useful for training neural nets
__lowerCAmelCase : Optional[int] = PoolFormerDropPath(_SCREAMING_SNAKE_CASE) if drop_path > 0.0 else nn.Identity()
__lowerCAmelCase : Union[str, Any] = config.use_layer_scale
if config.use_layer_scale:
__lowerCAmelCase : List[Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_SCREAMING_SNAKE_CASE)) , requires_grad=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_SCREAMING_SNAKE_CASE)) , requires_grad=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[int]:
"""simple docstring"""
if self.use_layer_scale:
__lowerCAmelCase : int = self.pooling(self.before_norm(_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : List[str] = self.layer_scale_a.unsqueeze(-1).unsqueeze(-1) * pooling_output
# First residual connection
__lowerCAmelCase : Optional[Any] = hidden_states + self.drop_path(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = ()
__lowerCAmelCase : Union[str, Any] = self.output(self.after_norm(_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : Dict = self.layer_scale_a.unsqueeze(-1).unsqueeze(-1) * layer_output
# Second residual connection
__lowerCAmelCase : List[str] = hidden_states + self.drop_path(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = (output,) + outputs
return outputs
else:
__lowerCAmelCase : Optional[Any] = self.drop_path(self.pooling(self.before_norm(_SCREAMING_SNAKE_CASE)))
# First residual connection
__lowerCAmelCase : Optional[Any] = pooling_output + hidden_states
__lowerCAmelCase : List[Any] = ()
# Second residual connection inside the PoolFormerOutput block
__lowerCAmelCase : Any = self.drop_path(self.output(self.after_norm(_SCREAMING_SNAKE_CASE)))
__lowerCAmelCase : str = hidden_states + layer_output
__lowerCAmelCase : List[Any] = (output,) + outputs
return outputs
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Optional[int] = config
# stochastic depth decay rule
__lowerCAmelCase : Tuple = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths))]
# patch embeddings
__lowerCAmelCase : List[str] = []
for i in range(config.num_encoder_blocks):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ))
__lowerCAmelCase : Tuple = nn.ModuleList(_SCREAMING_SNAKE_CASE)
# Transformer blocks
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : Any = 0
for i in range(config.num_encoder_blocks):
# each block consists of layers
__lowerCAmelCase : List[Any] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i]):
layers.append(
PoolFormerLayer(
_SCREAMING_SNAKE_CASE , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio) , drop_path=dpr[cur + j] , ))
blocks.append(nn.ModuleList(_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : Union[str, Any] = nn.ModuleList(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: str=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=True) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Dict = () if output_hidden_states else None
__lowerCAmelCase : Union[str, Any] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block)):
__lowerCAmelCase , __lowerCAmelCase : str = layers
# Get patch embeddings from hidden_states
__lowerCAmelCase : str = embedding_layer(_SCREAMING_SNAKE_CASE)
# Send the embeddings through the blocks
for _, blk in enumerate(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : int = blk(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = layer_outputs[0]
if output_hidden_states:
__lowerCAmelCase : int = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=_SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = PoolFormerConfig
SCREAMING_SNAKE_CASE = 'poolformer'
SCREAMING_SNAKE_CASE = 'pixel_values'
SCREAMING_SNAKE_CASE = True
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: List[Any]) -> List[str]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_SCREAMING_SNAKE_CASE , nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]=False) -> Dict:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : List[Any] = value
__snake_case : Union[str, Any] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__snake_case : str = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , __SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: Optional[int]) -> Any:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = config
__lowerCAmelCase : Any = PoolFormerEncoder(_SCREAMING_SNAKE_CASE)
# Initialize weights and apply final processing
self.post_init()
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: Optional[torch.FloatTensor] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
__lowerCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
__lowerCAmelCase : Union[str, Any] = self.encoder(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Tuple) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , config.hidden_size)
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.dense(_SCREAMING_SNAKE_CASE)
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , __SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Dict:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = config.num_labels
__lowerCAmelCase : Tuple = PoolFormerModel(_SCREAMING_SNAKE_CASE)
# Final norm
__lowerCAmelCase : Optional[Any] = PoolFormerGroupNorm(config.hidden_sizes[-1])
# Classifier head
__lowerCAmelCase : Any = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[torch.FloatTensor] = None , _SCREAMING_SNAKE_CASE: Optional[torch.LongTensor] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
__lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase : Union[str, Any] = self.poolformer(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = outputs[0]
__lowerCAmelCase : Optional[int] = self.classifier(self.norm(_SCREAMING_SNAKE_CASE).mean([-2, -1]))
__lowerCAmelCase : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCAmelCase : int = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCAmelCase : List[Any] = "single_label_classification"
else:
__lowerCAmelCase : Union[str, Any] = "multi_label_classification"
if self.config.problem_type == "regression":
__lowerCAmelCase : Dict = MSELoss()
if self.num_labels == 1:
__lowerCAmelCase : Optional[int] = loss_fct(logits.squeeze() , labels.squeeze())
else:
__lowerCAmelCase : int = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
elif self.config.problem_type == "single_label_classification":
__lowerCAmelCase : int = CrossEntropyLoss()
__lowerCAmelCase : str = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__lowerCAmelCase : Union[str, Any] = BCEWithLogitsLoss()
__lowerCAmelCase : Optional[int] = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
if not return_dict:
__lowerCAmelCase : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states)
| 58
| 0
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger()
@dataclass
class __magic_name__ :
'''simple docstring'''
lowerCamelCase__ : nn.Module
lowerCamelCase__ : List[nn.Module] = field(default_factory=lowerCamelCase__ )
lowerCamelCase__ : list = field(default_factory=lowerCamelCase__ )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_ ) -> List[str]:
"""simple docstring"""
a__ =len(list(m.modules() ) ) == 1 or isinstance(SCREAMING_SNAKE_CASE__, nn.Convad ) or isinstance(SCREAMING_SNAKE_CASE__, nn.BatchNormad )
if has_not_submodules:
self.traced.append(SCREAMING_SNAKE_CASE__ )
def __call__( self, lowercase_ ) -> Optional[int]:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(SCREAMING_SNAKE_CASE__ )
[x.remove() for x in self.handles]
return self
@property
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
return list(filter(lambda lowercase_ : len(list(x.state_dict().keys() ) ) > 0, self.traced ) )
@dataclass
class __magic_name__ :
'''simple docstring'''
lowerCamelCase__ : nn.Module
lowerCamelCase__ : nn.Module
lowerCamelCase__ : int = 0
lowerCamelCase__ : List = field(default_factory=lowerCamelCase__ )
lowerCamelCase__ : List = field(default_factory=lowerCamelCase__ )
def __call__( self, lowercase_ ) -> Optional[Any]:
"""simple docstring"""
a__ =Tracker(self.dest )(SCREAMING_SNAKE_CASE__ ).parametrized
a__ =Tracker(self.src )(SCREAMING_SNAKE_CASE__ ).parametrized
a__ =list(filter(lambda lowercase_ : type(SCREAMING_SNAKE_CASE__ ) not in self.src_skip, SCREAMING_SNAKE_CASE__ ) )
a__ =list(filter(lambda lowercase_ : type(SCREAMING_SNAKE_CASE__ ) not in self.dest_skip, SCREAMING_SNAKE_CASE__ ) )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise Exception(
F"""Numbers of operations are different. Source module has {len(SCREAMING_SNAKE_CASE__ )} operations while"""
F""" destination module has {len(SCREAMING_SNAKE_CASE__ )}.""" )
for dest_m, src_m in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
def UpperCAmelCase__ ( _A : str , _A : ResNetConfig , _A : Path , _A : bool = True ):
'''simple docstring'''
print(F"""Converting {name}...""" )
with torch.no_grad():
a__ =timm.create_model(a_ , pretrained=a_ ).eval()
a__ =ResNetForImageClassification(a_ ).eval()
a__ =ModuleTransfer(src=a_ , dest=a_ )
a__ =torch.randn((1, 3, 2_24, 2_24) )
module_transfer(a_ )
assert torch.allclose(from_model(a_ ) , our_model(a_ ).logits ), "The model logits don't match the original one."
a__ =F"""resnet{"-".join(name.split("resnet" ) )}"""
print(a_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=a_ , )
# we can use the convnext one
a__ =AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=a_ , )
print(F"""Pushed {checkpoint_name}""" )
def UpperCAmelCase__ ( _A : Path , _A : str = None , _A : bool = True ):
'''simple docstring'''
a__ ='''imagenet-1k-id2label.json'''
a__ =10_00
a__ =(1, num_labels)
a__ ='''huggingface/label-files'''
a__ =num_labels
a__ =json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''' ) , '''r''' ) )
a__ ={int(a_ ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
a__ =partial(a_ , num_labels=a_ , idalabel=a_ , labelaid=a_ )
a__ ={
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(a_ , names_to_config[model_name] , a_ , a_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(a_ , a_ , a_ , a_ )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowerCamelCase = parser.parse_args()
lowerCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 188
|
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
lowerCamelCase_ = logging.getLogger(__name__)
def __lowerCamelCase ( ) -> int:
__SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=a_ , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=a_ , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=a_ , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=a_ , default='''data/dump''' , help='''The dump file prefix.''' )
__SCREAMING_SNAKE_CASE :Any = parser.parse_args()
logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
__SCREAMING_SNAKE_CASE :Union[str, Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE :Optional[int] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__SCREAMING_SNAKE_CASE :Union[str, Any] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__SCREAMING_SNAKE_CASE :str = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE :str = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__SCREAMING_SNAKE_CASE :str = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__SCREAMING_SNAKE_CASE :Union[str, Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE :Optional[int] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__SCREAMING_SNAKE_CASE :str = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(f'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
__SCREAMING_SNAKE_CASE :Union[str, Any] = fp.readlines()
logger.info('''Start encoding''' )
logger.info(f'''{len(a_ )} examples to process.''' )
__SCREAMING_SNAKE_CASE :Optional[int] = []
__SCREAMING_SNAKE_CASE :List[str] = 0
__SCREAMING_SNAKE_CASE :Optional[Any] = 1_00_00
__SCREAMING_SNAKE_CASE :List[Any] = time.time()
for text in data:
__SCREAMING_SNAKE_CASE :Any = f'''{bos} {text.strip()} {sep}'''
__SCREAMING_SNAKE_CASE :int = tokenizer.encode(a_ , add_special_tokens=a_ )
rslt.append(a_ )
iter += 1
if iter % interval == 0:
__SCREAMING_SNAKE_CASE :Any = time.time()
logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
__SCREAMING_SNAKE_CASE :Any = time.time()
logger.info('''Finished binarization''' )
logger.info(f'''{len(a_ )} examples processed.''' )
__SCREAMING_SNAKE_CASE :Optional[int] = f'''{args.dump_file}.{args.tokenizer_name}.pickle'''
__SCREAMING_SNAKE_CASE :str = tokenizer.vocab_size
if vocab_size < (1 << 16):
__SCREAMING_SNAKE_CASE :Union[str, Any] = [np.uintaa(a_ ) for d in rslt]
else:
__SCREAMING_SNAKE_CASE :List[Any] = [np.intaa(a_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'''Dump to {dp_file}''' )
with open(a_ , '''wb''' ) as handle:
pickle.dump(rslt_ , a_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 191
| 0
|
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
"""simple docstring"""
def __init__( self : int , __UpperCamelCase : Dict , __UpperCamelCase : str=1_3 , __UpperCamelCase : Union[str, Any]=7 , __UpperCamelCase : str=True , __UpperCamelCase : int=True , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Optional[int]=9_9 , __UpperCamelCase : Dict=3_2 , __UpperCamelCase : int=5 , __UpperCamelCase : int=4 , __UpperCamelCase : List[Any]=3_7 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Tuple=5_1_2 , __UpperCamelCase : List[Any]=1_6 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : str=0.0_2 , __UpperCamelCase : Tuple=3 , __UpperCamelCase : Union[str, Any]=4 , __UpperCamelCase : Dict=None , )->str:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowercase__ ( self : int )->str:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Optional[Any] )->List[str]:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] )->List[Any]:
_UpperCAmelCase = BioGptModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] , )->Dict:
_UpperCAmelCase = BioGptForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , *__UpperCamelCase : str )->List[str]:
_UpperCAmelCase = BioGptModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# create attention mask
_UpperCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=__UpperCamelCase )
_UpperCAmelCase = self.seq_length // 2
_UpperCAmelCase = 0
# first forward pass
_UpperCAmelCase , _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
_UpperCAmelCase = ids_tensor((1,) , __UpperCamelCase ).item() + 1
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
_UpperCAmelCase = random_other_next_tokens
# append to next input_ids and attn_mask
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__UpperCamelCase )] , dim=1 , )
# get two different outputs
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase )['''last_hidden_state''']
_UpperCAmelCase = model(__UpperCamelCase , past_key_values=__UpperCamelCase , attention_mask=__UpperCamelCase )['''last_hidden_state''']
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def lowercase__ ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , *__UpperCamelCase : Tuple )->Optional[int]:
_UpperCAmelCase = BioGptModel(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
_UpperCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=__UpperCamelCase )
# first forward pass
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase )['''last_hidden_state''']
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[
'''last_hidden_state'''
]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Dict , *__UpperCamelCase : int , __UpperCamelCase : List[Any]=False )->Tuple:
_UpperCAmelCase = BioGptForCausalLM(__UpperCamelCase )
model.to(__UpperCamelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def lowercase__ ( self : str , __UpperCamelCase : Any , *__UpperCamelCase : List[Any] )->int:
_UpperCAmelCase = BioGptModel(__UpperCamelCase )
_UpperCAmelCase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , *__UpperCamelCase : Any )->Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = BioGptForTokenClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : str )->int:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
UpperCamelCase__ = (BioGptForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
def lowercase__ ( self : Optional[int] )->Union[str, Any]:
_UpperCAmelCase = BioGptModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def lowercase__ ( self : List[str] )->int:
self.config_tester.run_common_tests()
def lowercase__ ( self : Union[str, Any] )->str:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase__ ( self : Optional[Any] )->Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__UpperCamelCase )
def lowercase__ ( self : Tuple )->int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__UpperCamelCase , gradient_checkpointing=__UpperCamelCase )
def lowercase__ ( self : Optional[int] )->List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__UpperCamelCase )
@slow
def lowercase__ ( self : int )->Union[str, Any]:
_UpperCAmelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__UpperCamelCase )
_UpperCAmelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
_UpperCAmelCase = '''left'''
# Define PAD Token = EOS Token = 50256
_UpperCAmelCase = tokenizer.eos_token
_UpperCAmelCase = model.config.eos_token_id
# use different length sentences to test batching
_UpperCAmelCase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors='''pt''' , padding=__UpperCamelCase )
_UpperCAmelCase = inputs['''input_ids'''].to(__UpperCamelCase )
_UpperCAmelCase = model.generate(
input_ids=__UpperCamelCase , attention_mask=inputs['''attention_mask'''].to(__UpperCamelCase ) , )
_UpperCAmelCase = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__UpperCamelCase )
_UpperCAmelCase = model.generate(input_ids=__UpperCamelCase )
_UpperCAmelCase = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
_UpperCAmelCase = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__UpperCamelCase )
_UpperCAmelCase = model.generate(input_ids=__UpperCamelCase , max_length=model.config.max_length - num_paddings )
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
_UpperCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCamelCase )
_UpperCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCamelCase )
_UpperCAmelCase = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(__UpperCamelCase , [non_padded_sentence, padded_sentence] )
@slow
def lowercase__ ( self : Dict )->List[str]:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = BioGptModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowercase__ ( self : int )->str:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = input_dict['''input_ids''']
_UpperCAmelCase = input_ids.ne(1 ).to(__UpperCamelCase )
_UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_UpperCAmelCase = BioGptForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : str )->Tuple:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = '''multi_label_classification'''
_UpperCAmelCase = input_dict['''input_ids''']
_UpperCAmelCase = input_ids.ne(1 ).to(__UpperCamelCase )
_UpperCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_UpperCAmelCase = BioGptForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase__ ( self : Dict )->Optional[int]:
_UpperCAmelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
_UpperCAmelCase = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
_UpperCAmelCase = model(__UpperCamelCase )[0]
_UpperCAmelCase = 4_2_3_8_4
_UpperCAmelCase = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : str )->Optional[Any]:
_UpperCAmelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
_UpperCAmelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__UpperCamelCase )
torch.manual_seed(0 )
_UpperCAmelCase = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__UpperCamelCase )
_UpperCAmelCase = model.generate(
**__UpperCamelCase , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=__UpperCamelCase , )
_UpperCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCamelCase )
_UpperCAmelCase = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
| 370
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A : List[Any] = logging.get_logger(__name__)
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ["""pixel_values"""]
def __init__( self : Tuple , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Dict[str, int]] = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 2_5_5 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : Tuple , )->None:
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''shortest_edge''': 2_5_6}
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : int , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_UpperCAmelCase = get_resize_output_image_size(__UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCamelCase )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Dict , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Tuple , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
return center_crop(__UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Any , __UpperCamelCase : np.ndarray , __UpperCamelCase : float , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Union[str, Any] )->np.ndarray:
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[str] , )->np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : ImageInput , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[float] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__UpperCamelCase : str , )->List[Any]:
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
_UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 326
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Optional[Any]=6.0 , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Any=None , _lowerCAmelCase : Optional[Any]="fp4" , _lowerCAmelCase : str=False , **_lowerCAmelCase : Tuple , ):
SCREAMING_SNAKE_CASE_ = load_in_abit
SCREAMING_SNAKE_CASE_ = load_in_abit
SCREAMING_SNAKE_CASE_ = llm_inta_threshold
SCREAMING_SNAKE_CASE_ = llm_inta_skip_modules
SCREAMING_SNAKE_CASE_ = llm_inta_enable_fpaa_cpu_offload
SCREAMING_SNAKE_CASE_ = llm_inta_has_fpaa_weight
SCREAMING_SNAKE_CASE_ = bnb_abit_quant_type
SCREAMING_SNAKE_CASE_ = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
SCREAMING_SNAKE_CASE_ = torch.floataa
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , torch.dtype ):
SCREAMING_SNAKE_CASE_ = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def lowerCAmelCase_ ( self : Union[str, Any] ):
if not isinstance(self.llm_inta_threshold , lowerCAmelCase__ ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , lowerCAmelCase__ ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , lowerCAmelCase__ ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , lowerCAmelCase__ ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , lowerCAmelCase__ ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , lowerCAmelCase__ ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def lowerCAmelCase_ ( self : Tuple ):
return self.load_in_abit or self.load_in_abit
def lowerCAmelCase_ ( self : Tuple ):
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def lowerCAmelCase_ ( cls : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = cls(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = []
for key, value in kwargs.items():
if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
to_remove.append(lowerCAmelCase__ )
for key in to_remove:
kwargs.pop(lowerCAmelCase__ , lowerCAmelCase__ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Any ):
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as writer:
SCREAMING_SNAKE_CASE_ = self.to_dict()
SCREAMING_SNAKE_CASE_ = json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + '\n'
writer.write(lowerCAmelCase__ )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self : int ):
return F"{self.__class__.__name__} {self.to_json_string()}"
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : int = True ):
if use_diff is True:
SCREAMING_SNAKE_CASE_ = self.to_diff_dict()
else:
SCREAMING_SNAKE_CASE_ = self.to_dict()
return json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + "\n"
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.to_dict()
# get the default config dict
SCREAMING_SNAKE_CASE_ = BitsAndBytesConfig().to_dict()
SCREAMING_SNAKE_CASE_ = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
SCREAMING_SNAKE_CASE_ = value
return serializable_config_dict
| 225
|
def snake_case_ ( snake_case , snake_case ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196
| 0
|
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = 10
__UpperCamelCase = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
__UpperCamelCase = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(__A ) ),
} ,features=__A ,)
return dataset
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=__A )
return filename
# FILE_CONTENT + files
a__ = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
__UpperCamelCase = FILE_CONTENT
with open(__A ,"""w""" ) as f:
f.write(__A )
return filename
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ):
'''simple docstring'''
import bza
__UpperCamelCase = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
__UpperCamelCase = bytes(__A ,"""utf-8""" )
with bza.open(__A ,"""wb""" ) as f:
f.write(__A )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ):
'''simple docstring'''
import gzip
__UpperCamelCase = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
__UpperCamelCase = bytes(__A ,"""utf-8""" )
with gzip.open(__A ,"""wb""" ) as f:
f.write(__A )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__UpperCamelCase = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
__UpperCamelCase = bytes(__A ,"""utf-8""" )
with lza.frame.open(__A ,"""wb""" ) as f:
f.write(__A )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ,__A ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__UpperCamelCase = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(__A ,"""w""" ) as archive:
archive.write(__A ,arcname=os.path.basename(__A ) )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ,__A ):
'''simple docstring'''
import tarfile
__UpperCamelCase = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(__A ,"""w""" ) as f:
f.add(__A ,arcname=os.path.basename(__A ) )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ):
'''simple docstring'''
import lzma
__UpperCamelCase = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
__UpperCamelCase = bytes(__A ,"""utf-8""" )
with lzma.open(__A ,"""wb""" ) as f:
f.write(__A )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ,__A ):
'''simple docstring'''
import zipfile
__UpperCamelCase = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(__A ,"""w""" ) as f:
f.write(__A ,arcname=os.path.basename(__A ) )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__UpperCamelCase = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
__UpperCamelCase = bytes(__A ,"""utf-8""" )
with zstd.open(__A ,"""wb""" ) as f:
f.write(__A )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
__UpperCamelCase = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(__A ,"""w""" ) as f:
f.write(__A )
return filename
a__ = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
a__ = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
a__ = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
a__ = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
a__ = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="""session""" )
def _lowercase ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = datasets.Dataset.from_dict(__A )
__UpperCamelCase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=__A )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(__A ) ) as con:
__UpperCamelCase = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" ,tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(__A ,"""w""" ,newline="""""" ) as f:
__UpperCamelCase = csv.DictWriter(__A ,fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__A )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(__A ,"""w""" ,newline="""""" ) as f:
__UpperCamelCase = csv.DictWriter(__A ,fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__A )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ,__A ):
'''simple docstring'''
import bza
__UpperCamelCase = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(__A ,"""rb""" ) as f:
__UpperCamelCase = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__A ,"""wb""" ) as f:
f.write(__A )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__A ,"""w""" ) as f:
f.write(__A ,arcname=os.path.basename(__A ) )
f.write(__A ,arcname=os.path.basename(__A ) )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__A ,"""w""" ) as f:
f.write(__A ,arcname=os.path.basename(csv_path.replace(""".csv""" ,""".CSV""" ) ) )
f.write(__A ,arcname=os.path.basename(csva_path.replace(""".csv""" ,""".CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(__A ,"""w""" ) as f:
f.write(__A ,arcname=os.path.join("""main_dir""" ,os.path.basename(__A ) ) )
f.write(__A ,arcname=os.path.join("""main_dir""" ,os.path.basename(__A ) ) )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
__UpperCamelCase = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(__A ,"""wb""" ) as f:
__UpperCamelCase = pq.ParquetWriter(__A ,schema=__A )
__UpperCamelCase = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__A ) )] for k in DATA[0]} ,schema=__A )
writer.write_table(__A )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
__UpperCamelCase = {"""data""": DATA}
with open(__A ,"""w""" ) as f:
json.dump(__A ,__A )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
__UpperCamelCase = {"""data""": DATA_DICT_OF_LISTS}
with open(__A ,"""w""" ) as f:
json.dump(__A ,__A )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(__A ,"""w""" ) as f:
for item in DATA:
f.write(json.dumps(__A ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(__A ,"""w""" ) as f:
for item in DATA:
f.write(json.dumps(__A ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(__A ,"""w""" ) as f:
for item in DATA_312:
f.write(json.dumps(__A ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(__A ,"""w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(__A ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ,__A ):
'''simple docstring'''
import gzip
__UpperCamelCase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(__A ,"""rb""" ) as orig_file:
with gzip.open(__A ,"""wb""" ) as zipped_file:
zipped_file.writelines(__A )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ,__A ):
'''simple docstring'''
import gzip
__UpperCamelCase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(__A ,"""rb""" ) as orig_file:
with gzip.open(__A ,"""wb""" ) as zipped_file:
zipped_file.writelines(__A )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(__A ,"""w""" ) as f:
f.write(__A ,arcname=os.path.basename(__A ) )
f.write(__A ,arcname=os.path.basename(__A ) )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(__A ,"""w""" ) as f:
f.write(__A ,arcname=os.path.join("""nested""" ,os.path.basename(__A ) ) )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(__A ,"""w""" ) as f:
f.write(__A ,arcname=os.path.join("""main_dir""" ,os.path.basename(__A ) ) )
f.write(__A ,arcname=os.path.join("""main_dir""" ,os.path.basename(__A ) ) )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(__A ,"""w""" ) as f:
f.add(__A ,arcname=os.path.basename(__A ) )
f.add(__A ,arcname=os.path.basename(__A ) )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(__A ,"""w""" ) as f:
f.add(__A ,arcname=os.path.join("""nested""" ,os.path.basename(__A ) ) )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = ["""0""", """1""", """2""", """3"""]
__UpperCamelCase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(__A ,"""w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = ["""0""", """1""", """2""", """3"""]
__UpperCamelCase = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(__A ,"""w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = ["""0""", """1""", """2""", """3"""]
__UpperCamelCase = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(__A ,"""w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(__A ,"""w""" ) as f:
f.write(__A ,arcname=os.path.basename(__A ) )
f.write(__A ,arcname=os.path.basename(__A ) )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(__A ,"""w""" ) as f:
f.write(__A ,arcname=os.path.join("""main_dir""" ,os.path.basename(__A ) ) )
f.write(__A ,arcname=os.path.join("""main_dir""" ,os.path.basename(__A ) ) )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(__A ,"""w""" ) as f:
f.write(__A ,arcname=os.path.basename("""unsupported.ext""" ) )
f.write(__A ,arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
__UpperCamelCase = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(__A ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(__A )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( ):
'''simple docstring'''
return os.path.join("""tests""" ,"""features""" ,"""data""" ,"""test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def _lowercase ( ):
'''simple docstring'''
return os.path.join("""tests""" ,"""features""" ,"""data""" ,"""test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(__A ,"""w""" ) as f:
f.write(__A ,arcname=os.path.basename(__A ) )
f.write(__A ,arcname=os.path.basename(__A ).replace(""".jpg""" ,"""2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" ,"""w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" ,"""w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" ,"""w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" ,"""w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" ,"""w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 365
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a__ : List[Any] = logging.get_logger(__name__)
a__ : str = {'vocab_file': 'vocab.txt'}
a__ : Any = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
a__ : Tuple = {
'YituTech/conv-bert-base': 5_1_2,
'YituTech/conv-bert-medium-small': 5_1_2,
'YituTech/conv-bert-small': 5_1_2,
}
a__ : str = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ConvBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> int:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(lowercase , normalizer_state.pop("""type""" ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**lowercase )
__UpperCamelCase = do_lower_case
def __lowerCamelCase ( self , lowercase , lowercase=None ) -> Tuple:
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
__UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 243
| 0
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , lowerCAmelCase__ , )
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = RobertaConfig
lowerCAmelCase_ = '''roberta'''
def __init__( self : Any , _A : List[str] ):
"""simple docstring"""
super().__init__(_A )
__SCREAMING_SNAKE_CASE : List[str] = RobertaEmbeddings(_A )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , lowerCAmelCase__ , )
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = RobertaConfig
lowerCAmelCase_ = '''roberta'''
def __init__( self : Any , _A : str ):
"""simple docstring"""
super().__init__(_A )
__SCREAMING_SNAKE_CASE : Any = config.num_labels
__SCREAMING_SNAKE_CASE : Dict = config.num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = DeeRobertaModel(_A )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Dropout(config.hidden_dropout_prob )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_A )
def UpperCAmelCase__ ( self : int , _A : Dict=None , _A : List[Any]=None , _A : Optional[int]=None , _A : Tuple=None , _A : Any=None , _A : Tuple=None , _A : Dict=None , _A : Any=-1 , _A : List[str]=False , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.num_layers
try:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.roberta(
_A , attention_mask=_A , token_type_ids=_A , position_ids=_A , head_mask=_A , inputs_embeds=_A , )
__SCREAMING_SNAKE_CASE : Any = outputs[1]
__SCREAMING_SNAKE_CASE : Tuple = self.dropout(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.classifier(_A )
__SCREAMING_SNAKE_CASE : List[str] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__SCREAMING_SNAKE_CASE : str = e.message
__SCREAMING_SNAKE_CASE : Tuple = e.exit_layer
__SCREAMING_SNAKE_CASE : str = outputs[0]
if not self.training:
__SCREAMING_SNAKE_CASE : int = entropy(_A )
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : int = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__SCREAMING_SNAKE_CASE : str = MSELoss()
__SCREAMING_SNAKE_CASE : Any = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = CrossEntropyLoss()
__SCREAMING_SNAKE_CASE : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__SCREAMING_SNAKE_CASE : Any = []
for highway_exit in outputs[-1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_A )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__SCREAMING_SNAKE_CASE : List[str] = MSELoss()
__SCREAMING_SNAKE_CASE : Union[str, Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__SCREAMING_SNAKE_CASE : Any = CrossEntropyLoss()
__SCREAMING_SNAKE_CASE : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_A )
if train_highway:
__SCREAMING_SNAKE_CASE : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = (loss,) + outputs
if not self.training:
__SCREAMING_SNAKE_CASE : Tuple = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__SCREAMING_SNAKE_CASE : Optional[int] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 303
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowercase_ = numpy.array([0, 0])
lowercase_ = numpy.array([0.5, 0.866_0254])
lowercase_ = numpy.array([1, 0])
lowercase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = initial_vectors
for _ in range(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = iteration_step(snake_case )
return vectors
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = []
for i, start_vector in enumerate(vectors[:-1] ):
__SCREAMING_SNAKE_CASE : str = vectors[i + 1]
new_vectors.append(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = numpy.radians(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = numpy.cos(snake_case ), numpy.sin(snake_case )
__SCREAMING_SNAKE_CASE : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(snake_case , snake_case )
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = zip(*snake_case )
plt.plot(snake_case , snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 303
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """xmod"""
def __init__( self : str , lowerCamelCase_ : str=3_0522 , lowerCamelCase_ : Union[str, Any]=768 , lowerCamelCase_ : Any=12 , lowerCamelCase_ : Union[str, Any]=12 , lowerCamelCase_ : str=3072 , lowerCamelCase_ : Dict="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : Dict=512 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : str=0.0_2 , lowerCamelCase_ : Optional[int]=1E-12 , lowerCamelCase_ : Tuple=1 , lowerCamelCase_ : Dict=0 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : str=True , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Any=False , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Optional[Any]=("en_XX",) , lowerCamelCase_ : Dict=None , **lowerCamelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
UpperCamelCase = pre_norm
UpperCamelCase = adapter_reduction_factor
UpperCamelCase = adapter_layer_norm
UpperCamelCase = adapter_reuse_layer_norm
UpperCamelCase = ln_before_adapter
UpperCamelCase = list(lowerCamelCase_ )
UpperCamelCase = default_language
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 165
|
def lowercase( UpperCamelCase_ = 1000000 ) -> int:
'''simple docstring'''
UpperCamelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , UpperCamelCase_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 165
| 1
|
"""simple docstring"""
def __lowercase ( snake_case_ : int ,snake_case_ : int ) ->int:
'''simple docstring'''
return int(input_a == input_a == 0 )
def __lowercase ( ) ->None:
'''simple docstring'''
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(F"""| 0 | 0 | {nor_gate(0 ,0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 ,1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 ,0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 ,1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 179
|
"""simple docstring"""
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->Any:
'''simple docstring'''
_enforce_args(snake_case_ ,snake_case_ )
if n == 0:
return 0
__A : int = float('''-inf''' )
for i in range(1 ,n + 1 ):
__A : Union[str, Any] = max(
snake_case_ ,prices[i - 1] + naive_cut_rod_recursive(n - i ,snake_case_ ) )
return max_revue
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->int:
'''simple docstring'''
_enforce_args(snake_case_ ,snake_case_ )
__A : Dict = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case_ ,snake_case_ ,snake_case_ )
def __lowercase ( snake_case_ : int ,snake_case_ : list ,snake_case_ : list ) ->Any:
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__A : Any = float('''-inf''' )
for i in range(1 ,n + 1 ):
__A : Union[str, Any] = max(
snake_case_ ,prices[i - 1] + _top_down_cut_rod_recursive(n - i ,snake_case_ ,snake_case_ ) ,)
__A : Any = max_revenue
return max_rev[n]
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->Any:
'''simple docstring'''
_enforce_args(snake_case_ ,snake_case_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__A : Union[str, Any] = [float('''-inf''' ) for _ in range(n + 1 )]
__A : List[Any] = 0
for i in range(1 ,n + 1 ):
__A : Union[str, Any] = max_rev[i]
for j in range(1 ,i + 1 ):
__A : str = max(snake_case_ ,prices[j - 1] + max_rev[i - j] )
__A : List[str] = max_revenue_i
return max_rev[n]
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->Union[str, Any]:
'''simple docstring'''
if n < 0:
__A : Union[str, Any] = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(snake_case_ )
if n > len(snake_case_ ):
__A : List[Any] = (
'''Each integral piece of rod must have a corresponding price. '''
F"""Got n = {n} but length of prices = {len(snake_case_ )}"""
)
raise ValueError(snake_case_ )
def __lowercase ( ) ->str:
'''simple docstring'''
__A : Any = [6, 10, 12, 15, 20, 23]
__A : Union[str, Any] = len(snake_case_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__A : str = 36
__A : Any = top_down_cut_rod(snake_case_ ,snake_case_ )
__A : Any = bottom_up_cut_rod(snake_case_ ,snake_case_ )
__A : Optional[Any] = naive_cut_rod_recursive(snake_case_ ,snake_case_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 179
| 1
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_a = 3_84
_a = 7
if "tiny" in model_name:
_a = 96
_a = (2, 2, 6, 2)
_a = (3, 6, 12, 24)
elif "small" in model_name:
_a = 96
_a = (2, 2, 18, 2)
_a = (3, 6, 12, 24)
elif "base" in model_name:
_a = 1_28
_a = (2, 2, 18, 2)
_a = (4, 8, 16, 32)
_a = 12
_a = 5_12
elif "large" in model_name:
_a = 1_92
_a = (2, 2, 18, 2)
_a = (6, 12, 24, 48)
_a = 12
_a = 7_68
# set label information
_a = 1_50
_a = 'huggingface/label-files'
_a = 'ade20k-id2label.json'
_a = json.load(open(hf_hub_download(_UpperCAmelCase, _UpperCAmelCase, repo_type='''dataset''' ), '''r''' ) )
_a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
_a = {v: k for k, v in idalabel.items()}
_a = SwinConfig(
embed_dim=_UpperCAmelCase, depths=_UpperCAmelCase, num_heads=_UpperCAmelCase, window_size=_UpperCAmelCase, out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''], )
_a = UperNetConfig(
backbone_config=_UpperCAmelCase, auxiliary_in_channels=_UpperCAmelCase, num_labels=_UpperCAmelCase, idalabel=_UpperCAmelCase, labelaid=_UpperCAmelCase, )
return config
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_a = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : List[str], _lowerCAmelCase : str ):
"""simple docstring"""
_a = dct.pop(_UpperCAmelCase )
_a = val
def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_a = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_a = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_a = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' )
_a = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_a = in_proj_weight[:dim, :]
_a = in_proj_bias[: dim]
_a = in_proj_weight[
dim : dim * 2, :
]
_a = in_proj_bias[
dim : dim * 2
]
_a = in_proj_weight[
-dim :, :
]
_a = in_proj_bias[-dim :]
# fmt: on
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
_a = x.shape
_a = x.reshape(_UpperCAmelCase, 4, in_channel // 4 )
_a = x[:, [0, 2, 1, 3], :].transpose(1, 2 ).reshape(_UpperCAmelCase, _UpperCAmelCase )
return x
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_a = x.shape
_a = x.reshape(_UpperCAmelCase, in_channel // 4, 4 )
_a = x[:, :, [0, 2, 1, 3]].transpose(1, 2 ).reshape(_UpperCAmelCase, _UpperCAmelCase )
return x
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_a = x.shape[0]
_a = x.reshape(4, in_channel // 4 )
_a = x[[0, 2, 1, 3], :].transpose(0, 1 ).reshape(_UpperCAmelCase )
return x
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_a = x.shape[0]
_a = x.reshape(in_channel // 4, 4 )
_a = x[:, [0, 2, 1, 3]].transpose(0, 1 ).reshape(_UpperCAmelCase )
return x
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Dict ):
"""simple docstring"""
_a = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
_a = model_name_to_url[model_name]
_a = torch.hub.load_state_dict_from_url(_UpperCAmelCase, map_location='''cpu''', file_name=_UpperCAmelCase )[
'state_dict'
]
for name, param in state_dict.items():
print(_UpperCAmelCase, param.shape )
_a = get_upernet_config(_UpperCAmelCase )
_a = UperNetForSemanticSegmentation(_UpperCAmelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_a = state_dict.pop(_UpperCAmelCase )
if "bn" in key:
_a = key.replace('''bn''', '''batch_norm''' )
_a = val
# rename keys
_a = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase, config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
_a = reverse_correct_unfold_reduction_order(_UpperCAmelCase )
if "norm" in key:
_a = reverse_correct_unfold_norm_order(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
# verify on image
_a = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
_a = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase ).raw ).convert('''RGB''' )
_a = SegformerImageProcessor()
_a = processor(_UpperCAmelCase, return_tensors='''pt''' ).pixel_values
with torch.no_grad():
_a = model(_UpperCAmelCase )
_a = outputs.logits
print(logits.shape )
print('''First values of logits:''', logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
_a = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] )
elif model_name == "upernet-swin-small":
_a = torch.tensor(
[[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] )
elif model_name == "upernet-swin-base":
_a = torch.tensor(
[[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] )
elif model_name == "upernet-swin-large":
_a = torch.tensor(
[[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] )
print('''Logits:''', outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], _UpperCAmelCase, atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCAmelCase )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[f'upernet-swin-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 350
|
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=True , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> str:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_multiple_size
_a = hidden_act
_a = hidden_dropout
_a = attention_dropout
_a = weight_tying
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def _UpperCAmelCase ( self ) -> Tuple:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a , _a , _a , _a = self.prepare_config_and_inputs()
_a = True
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
_a = GPTNeoXJapaneseModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
_a = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
_a = True
_a = GPTNeoXJapaneseModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
_a = GPTNeoXJapaneseForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
_a = True
_a = GPTNeoXJapaneseForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
_a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat([input_mask, next_mask] , dim=-1 )
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
_a = output_from_no_past['''hidden_states'''][0]
_a = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )['''hidden_states'''][0]
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -3:, random_slice_idx].detach()
_a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def _UpperCAmelCase ( self ) -> List[str]:
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
A_ : str = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
A_ : Tuple = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
A_ : List[str] = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
A_ : Any = False
A_ : Optional[Any] = False
A_ : Tuple = False
A_ : Optional[int] = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = GPTNeoXJapaneseModelTester(self )
_a = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> str:
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
# This regression test was failing with PyTorch < 1.3
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs_for_decoder()
_a = None
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[str]:
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = '''abeja/gpt-neox-japanese-2.7b'''
_a = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
_a = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
_a = GPTNeoXJapaneseTokenizer.from_pretrained(__UpperCAmelCase )
_a = GPTNeoXJapaneseForCausalLM.from_pretrained(__UpperCAmelCase )
_a = []
for prompt in prompts:
_a = tokenizer(__UpperCAmelCase , return_tensors='''pt''' ).input_ids
_a = model.generate(__UpperCAmelCase , max_length=50 )
_a = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 153
| 0
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=100 , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=[0, 1, 2, 3] , ) ->Dict:
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : int = 100
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : Any = patch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_labels
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Any = scope
SCREAMING_SNAKE_CASE : Any = out_indices
SCREAMING_SNAKE_CASE : List[str] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE : Any = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : Optional[Any] = num_patches + 1
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : List[Any] = BeitModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[str]:
SCREAMING_SNAKE_CASE : List[Any] = BeitForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : int = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Any:
SCREAMING_SNAKE_CASE : List[str] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = BeitForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = BeitForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : str = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = BeitForSemanticSegmentation(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = config_and_inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : Optional[int] = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : List[Any] = BeitModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ) ->Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) ->Any:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __lowerCAmelCase ( self ) ->Any:
pass
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Any = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_lowerCamelCase ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE : Any = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(**_lowerCamelCase ).loss
loss.backward()
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : int = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_lowerCamelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE : int = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = model(**_lowerCamelCase ).loss
loss.backward()
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = _config_zero_init(_lowerCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = model_class(config=_lowerCamelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def __lowerCAmelCase ( self ) ->int:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Any = BeitModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : List[Any] = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : List[Any] = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values.to(_lowerCamelCase )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(pixel_values=_lowerCamelCase , bool_masked_pos=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _lowerCamelCase , atol=1e-2 ) )
@slow
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Any = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : int = prepare_img()
SCREAMING_SNAKE_CASE : List[Any] = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : int = torch.Size((1, 1000) )
self.assertEqual(logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
SCREAMING_SNAKE_CASE : Optional[int] = 281
self.assertEqual(logits.argmax(-1 ).item() , _lowerCamelCase )
@slow
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Any = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
SCREAMING_SNAKE_CASE : Optional[int] = 2396
self.assertEqual(logits.argmax(-1 ).item() , _lowerCamelCase )
@slow
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
SCREAMING_SNAKE_CASE : Any = model.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = BeitImageProcessor(do_resize=_lowerCamelCase , size=640 , do_center_crop=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(ds[0]['''file'''] )
SCREAMING_SNAKE_CASE : Any = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=_lowerCamelCase , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=_lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Optional[int] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
SCREAMING_SNAKE_CASE : Dict = model.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = BeitImageProcessor(do_resize=_lowerCamelCase , size=640 , do_center_crop=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
SCREAMING_SNAKE_CASE : str = Image.open(ds[0]['''file'''] )
SCREAMING_SNAKE_CASE : Any = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE : List[Any] = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase , target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : str = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , _lowerCamelCase )
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = KandinskyImgaImgPipeline
__SCREAMING_SNAKE_CASE : str = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
__SCREAMING_SNAKE_CASE : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
__SCREAMING_SNAKE_CASE : int = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->int:
return 32
@property
def __lowerCAmelCase ( self ) ->List[str]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Tuple:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 100
@property
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE : Dict = MultilingualCLIP(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = text_encoder.eval()
return text_encoder
@property
def __lowerCAmelCase ( self ) ->Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Any = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_movq
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str:
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Dict = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : str = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE : Any = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Dict = pipeline(
_lowerCamelCase , image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowerCAmelCase : int = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : Optional[Any] ) -> List[List[ImageInput]]:
if isinstance(_lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_lowerCAmelCase ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = ['''pixel_values''']
def __init__( self :List[str] , snake_case :bool = True , snake_case :Dict[str, int] = None , snake_case :PILImageResampling = PILImageResampling.BILINEAR , snake_case :bool = True , snake_case :Dict[str, int] = None , snake_case :bool = True , snake_case :Union[int, float] = 1 / 255 , snake_case :bool = True , snake_case :bool = True , snake_case :Optional[Union[float, List[float]]] = None , snake_case :Optional[Union[float, List[float]]] = None , **snake_case :Any , ):
'''simple docstring'''
super().__init__(**snake_case )
A_ : str = size if size is not None else {"shortest_edge": 256}
A_ : str = get_size_dict(snake_case , default_to_square=snake_case )
A_ : Any = crop_size if crop_size is not None else {"height": 224, "width": 224}
A_ : Union[str, Any] = get_size_dict(snake_case , param_name="crop_size" )
A_ : int = do_resize
A_ : Union[str, Any] = size
A_ : List[Any] = do_center_crop
A_ : Dict = crop_size
A_ : List[Any] = resample
A_ : Optional[int] = do_rescale
A_ : Union[str, Any] = rescale_factor
A_ : Tuple = offset
A_ : Optional[int] = do_normalize
A_ : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A_ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :np.ndarray , snake_case :Dict[str, int] , snake_case :PILImageResampling = PILImageResampling.BILINEAR , snake_case :Optional[Union[str, ChannelDimension]] = None , **snake_case :Dict , ):
'''simple docstring'''
A_ : Optional[Any] = get_size_dict(snake_case , default_to_square=snake_case )
if "shortest_edge" in size:
A_ : Any = get_resize_output_image_size(snake_case , size["shortest_edge"] , default_to_square=snake_case )
elif "height" in size and "width" in size:
A_ : Optional[int] = (size["height"], size["width"])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :np.ndarray , snake_case :Dict[str, int] , snake_case :Optional[Union[str, ChannelDimension]] = None , **snake_case :Dict , ):
'''simple docstring'''
A_ : str = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(snake_case , size=(size["height"], size["width"]) , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :np.ndarray , snake_case :Union[int, float] , snake_case :bool = True , snake_case :Optional[Union[str, ChannelDimension]] = None , **snake_case :List[Any] , ):
'''simple docstring'''
A_ : List[Any] = image.astype(np.floataa )
if offset:
A_ : Optional[Any] = image - (scale / 2)
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :np.ndarray , snake_case :Union[float, List[float]] , snake_case :Union[float, List[float]] , snake_case :Optional[Union[str, ChannelDimension]] = None , **snake_case :Dict , ):
'''simple docstring'''
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :ImageInput , snake_case :bool = None , snake_case :Dict[str, int] = None , snake_case :PILImageResampling = None , snake_case :bool = None , snake_case :Dict[str, int] = None , snake_case :bool = None , snake_case :float = None , snake_case :bool = None , snake_case :bool = None , snake_case :Optional[Union[float, List[float]]] = None , snake_case :Optional[Union[float, List[float]]] = None , snake_case :Optional[ChannelDimension] = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
A_ : Dict = to_numpy_array(snake_case )
if do_resize:
A_ : Any = self.resize(image=snake_case , size=snake_case , resample=snake_case )
if do_center_crop:
A_ : Tuple = self.center_crop(snake_case , size=snake_case )
if do_rescale:
A_ : Optional[Any] = self.rescale(image=snake_case , scale=snake_case , offset=snake_case )
if do_normalize:
A_ : List[str] = self.normalize(image=snake_case , mean=snake_case , std=snake_case )
A_ : str = to_channel_dimension_format(snake_case , snake_case )
return image
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :ImageInput , snake_case :bool = None , snake_case :Dict[str, int] = None , snake_case :PILImageResampling = None , snake_case :bool = None , snake_case :Dict[str, int] = None , snake_case :bool = None , snake_case :float = None , snake_case :bool = None , snake_case :bool = None , snake_case :Optional[Union[float, List[float]]] = None , snake_case :Optional[Union[float, List[float]]] = None , snake_case :Optional[Union[str, TensorType]] = None , snake_case :ChannelDimension = ChannelDimension.FIRST , **snake_case :Union[str, Any] , ):
'''simple docstring'''
A_ : str = do_resize if do_resize is not None else self.do_resize
A_ : int = resample if resample is not None else self.resample
A_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Any = offset if offset is not None else self.offset
A_ : Any = do_normalize if do_normalize is not None else self.do_normalize
A_ : str = image_mean if image_mean is not None else self.image_mean
A_ : str = image_std if image_std is not None else self.image_std
A_ : Optional[Any] = size if size is not None else self.size
A_ : List[Any] = get_size_dict(snake_case , default_to_square=snake_case )
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : Dict = get_size_dict(snake_case , param_name="crop_size" )
if not valid_images(snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
A_ : str = make_batched(snake_case )
A_ : str = [
[
self._preprocess_image(
image=snake_case , do_resize=snake_case , size=snake_case , resample=snake_case , do_center_crop=snake_case , crop_size=snake_case , do_rescale=snake_case , rescale_factor=snake_case , offset=snake_case , do_normalize=snake_case , image_mean=snake_case , image_std=snake_case , data_format=snake_case , )
for img in video
]
for video in videos
]
A_ : Tuple = {"pixel_values": videos}
return BatchFeature(data=snake_case , tensor_type=snake_case )
| 70
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Any = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
_lowerCAmelCase : int = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70
| 1
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
a_ = logging.get_logger(__name__)
# General docstring
a_ = """RegNetConfig"""
# Base docstring
a_ = """facebook/regnet-y-040"""
a_ = [1, 1_088, 7, 7]
# Image classification docstring
a_ = """facebook/regnet-y-040"""
a_ = """tabby, tabby cat"""
a_ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = 3 , __UpperCAmelCase = 1 , __UpperCAmelCase = 1 , __UpperCAmelCase = "relu" , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__lowerCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__lowerCamelCase = tf.keras.layers.ConvaD(
filters=__UpperCAmelCase , kernel_size=__UpperCAmelCase , strides=__UpperCAmelCase , padding='''VALID''' , groups=__UpperCAmelCase , use_bias=__UpperCAmelCase , name='''convolution''' , )
__lowerCamelCase = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
__lowerCamelCase = ACTaFN[activation] if activation is not None else tf.identity
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.convolution(self.padding(__UpperCAmelCase ) )
__lowerCamelCase = self.normalization(__UpperCAmelCase )
__lowerCamelCase = self.activation(__UpperCAmelCase )
return hidden_state
class __lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__lowerCamelCase = config.num_channels
__lowerCamelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = shape_list(__UpperCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__lowerCamelCase = tf.transpose(__UpperCAmelCase , perm=(0, 2, 3, 1) )
__lowerCamelCase = self.embedder(__UpperCAmelCase )
return hidden_state
class __lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = 2 , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__lowerCamelCase = tf.keras.layers.ConvaD(
filters=__UpperCAmelCase , kernel_size=1 , strides=__UpperCAmelCase , use_bias=__UpperCAmelCase , name='''convolution''' )
__lowerCamelCase = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False ):
'''simple docstring'''
return self.normalization(self.convolution(__UpperCAmelCase ) , training=__UpperCAmelCase )
class __lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__lowerCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__UpperCAmelCase , name='''pooler''' )
__lowerCamelCase = [
tf.keras.layers.ConvaD(filters=__UpperCAmelCase , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=__UpperCAmelCase , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__lowerCamelCase = self.pooler(__UpperCAmelCase )
for layer_module in self.attention:
__lowerCamelCase = layer_module(__UpperCAmelCase )
__lowerCamelCase = hidden_state * pooled
return hidden_state
class __lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__lowerCamelCase = in_channels != out_channels or stride != 1
__lowerCamelCase = max(1 , out_channels // config.groups_width )
__lowerCamelCase = (
TFRegNetShortCut(__UpperCAmelCase , stride=__UpperCAmelCase , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__lowerCamelCase = [
TFRegNetConvLayer(__UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
__UpperCAmelCase , stride=__UpperCAmelCase , groups=__UpperCAmelCase , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(__UpperCAmelCase , kernel_size=1 , activation=__UpperCAmelCase , name='''layer.2''' ),
]
__lowerCamelCase = ACTaFN[config.hidden_act]
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = hidden_state
for layer_module in self.layers:
__lowerCamelCase = layer_module(__UpperCAmelCase )
__lowerCamelCase = self.shortcut(__UpperCAmelCase )
hidden_state += residual
__lowerCamelCase = self.activation(__UpperCAmelCase )
return hidden_state
class __lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__lowerCamelCase = in_channels != out_channels or stride != 1
__lowerCamelCase = max(1 , out_channels // config.groups_width )
__lowerCamelCase = (
TFRegNetShortCut(__UpperCAmelCase , stride=__UpperCAmelCase , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
__lowerCamelCase = [
TFRegNetConvLayer(__UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
__UpperCAmelCase , stride=__UpperCAmelCase , groups=__UpperCAmelCase , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(__UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(__UpperCAmelCase , kernel_size=1 , activation=__UpperCAmelCase , name='''layer.3''' ),
]
__lowerCamelCase = ACTaFN[config.hidden_act]
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = hidden_state
for layer_module in self.layers:
__lowerCamelCase = layer_module(__UpperCAmelCase )
__lowerCamelCase = self.shortcut(__UpperCAmelCase )
hidden_state += residual
__lowerCamelCase = self.activation(__UpperCAmelCase )
return hidden_state
class __lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 2 , __UpperCAmelCase = 2 , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__lowerCamelCase = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
__lowerCamelCase = [
# downsampling is done in the first layer with stride of 2
layer(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase , name='''layers.0''' ),
*[layer(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
for layer_module in self.layers:
__lowerCamelCase = layer_module(__UpperCAmelCase )
return hidden_state
class __lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__lowerCamelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
__lowerCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__UpperCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , depth=__UpperCAmelCase , name=F"""stages.{i+1}""" ) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = True ):
'''simple docstring'''
__lowerCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCamelCase = hidden_states + (hidden_state,)
__lowerCamelCase = stage_module(__UpperCAmelCase )
if output_hidden_states:
__lowerCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__UpperCAmelCase , hidden_states=__UpperCAmelCase )
@keras_serializable
class __lowerCAmelCase ( tf.keras.layers.Layer ):
lowerCAmelCase__ = RegNetConfig
def __init__( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__lowerCamelCase = config
__lowerCamelCase = TFRegNetEmbeddings(__UpperCAmelCase , name='''embedder''' )
__lowerCamelCase = TFRegNetEncoder(__UpperCAmelCase , name='''encoder''' )
__lowerCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__UpperCAmelCase , name='''pooler''' )
@unpack_inputs
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , ):
'''simple docstring'''
__lowerCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase = self.embedder(__UpperCAmelCase , training=__UpperCAmelCase )
__lowerCamelCase = self.encoder(
__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase , training=__UpperCAmelCase )
__lowerCamelCase = encoder_outputs[0]
__lowerCamelCase = self.pooler(__UpperCAmelCase )
# Change to NCHW output format have uniformity in the modules
__lowerCamelCase = tf.transpose(__UpperCAmelCase , perm=(0, 3, 1, 2) )
__lowerCamelCase = tf.transpose(__UpperCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__lowerCamelCase = tuple([tf.transpose(__UpperCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__UpperCAmelCase , pooler_output=__UpperCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = RegNetConfig
lowerCAmelCase__ = """regnet"""
lowerCAmelCase__ = """pixel_values"""
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
a_ = R"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
a_ = R"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , lowerCAmelCase__ , )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
__lowerCamelCase = TFRegNetMainLayer(__UpperCAmelCase , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=False , ):
'''simple docstring'''
__lowerCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase = self.regnet(
pixel_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase , training=__UpperCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , lowerCAmelCase__ , )
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
__lowerCamelCase = config.num_labels
__lowerCamelCase = TFRegNetMainLayer(__UpperCAmelCase , name='''regnet''' )
# classification head
__lowerCamelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase ( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=False , ):
'''simple docstring'''
__lowerCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase = self.regnet(
__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase , training=__UpperCAmelCase )
__lowerCamelCase = outputs.pooler_output if return_dict else outputs[1]
__lowerCamelCase = self.classifier[0](__UpperCAmelCase )
__lowerCamelCase = self.classifier[1](__UpperCAmelCase )
__lowerCamelCase = None if labels is None else self.hf_compute_loss(labels=__UpperCAmelCase , logits=__UpperCAmelCase )
if not return_dict:
__lowerCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states )
| 330
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCAmelCase ( lowerCAmelCase__ ):
@slow
@require_torch
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__lowerCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 128
__lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(__UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=512 )
__lowerCamelCase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=128 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__lowerCamelCase = outputs.attention_mask
assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__UpperCAmelCase ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy='''steps''' , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , )
# start training
trainer.train()
| 330
| 1
|
'''simple docstring'''
import os
import sys
import unittest
lowerCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCamelCase : Optional[int] = os.path.join(git_repo_path, "src", "transformers")
lowerCamelCase : Union[str, Any] = "\n{0} = None\n"
lowerCamelCase : Optional[Any] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
lowerCamelCase : List[Any] = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class A__ ( unittest.TestCase ):
def A ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(_a )
_SCREAMING_SNAKE_CASE =find_backend(' if not is_tokenizers_available():' )
self.assertEqual(_a , 'tokenizers' )
_SCREAMING_SNAKE_CASE =find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(_a , 'tensorflow_text' )
_SCREAMING_SNAKE_CASE =find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(_a , 'sentencepiece_and_tokenizers' )
_SCREAMING_SNAKE_CASE =find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(_a , 'sentencepiece_and_tensorflow_text' )
_SCREAMING_SNAKE_CASE =find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(_a , 'sentencepiece_and_tokenizers_and_vision' )
def A ( self : Optional[int] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , _a )
self.assertIn('tensorflow_text' , _a )
self.assertIn('sentencepiece_and_tokenizers' , _a )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def A ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(_a , '\nCONSTANT = None\n' )
_SCREAMING_SNAKE_CASE =create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
_a , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
_SCREAMING_SNAKE_CASE ='\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
_SCREAMING_SNAKE_CASE =create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(_a , _a )
def A ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
_SCREAMING_SNAKE_CASE =create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , _a )
| 360
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class A__ ( A__ ):
A__ = 'MCTCTFeatureExtractor'
A__ = 'AutoTokenizer'
def __init__( self : Optional[Any] , _a : Optional[int] , _a : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_a , _a )
_SCREAMING_SNAKE_CASE =self.feature_extractor
_SCREAMING_SNAKE_CASE =False
def __call__( self : Dict , *_a : str , **_a : Dict ) -> Dict:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
_SCREAMING_SNAKE_CASE =kwargs.pop('raw_speech' )
else:
_SCREAMING_SNAKE_CASE =kwargs.pop('audio' , _a )
_SCREAMING_SNAKE_CASE =kwargs.pop('sampling_rate' , _a )
_SCREAMING_SNAKE_CASE =kwargs.pop('text' , _a )
if len(_a ) > 0:
_SCREAMING_SNAKE_CASE =args[0]
_SCREAMING_SNAKE_CASE =args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
_SCREAMING_SNAKE_CASE =self.feature_extractor(_a , *_a , sampling_rate=_a , **_a )
if text is not None:
_SCREAMING_SNAKE_CASE =self.tokenizer(_a , **_a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_SCREAMING_SNAKE_CASE =encodings['input_ids']
return inputs
def A ( self : Any , *_a : List[str] , **_a : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_a , **_a )
def A ( self : Dict , *_a : Tuple , **_a : Dict ) -> List[str]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*_a , **_a )
_SCREAMING_SNAKE_CASE =kwargs.pop('input_features' , _a )
_SCREAMING_SNAKE_CASE =kwargs.pop('labels' , _a )
if len(_a ) > 0:
_SCREAMING_SNAKE_CASE =args[0]
_SCREAMING_SNAKE_CASE =args[1:]
if input_features is not None:
_SCREAMING_SNAKE_CASE =self.feature_extractor.pad(_a , *_a , **_a )
if labels is not None:
_SCREAMING_SNAKE_CASE =self.tokenizer.pad(_a , **_a )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_SCREAMING_SNAKE_CASE =labels['input_ids']
return input_features
def A ( self : Tuple , *_a : Dict , **_a : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def A ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =self.tokenizer
yield
_SCREAMING_SNAKE_CASE =self.feature_extractor
_SCREAMING_SNAKE_CASE =False
| 114
| 0
|
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """vocab.txt"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
lowerCamelCase__ = {
"""openbmb/cpm-ant-10b""": 1_024,
}
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = collections.OrderedDict()
with open(_UpperCamelCase , 'r' , encoding='utf-8' ) as reader:
__lowerCAmelCase : Dict = reader.readlines()
for index, token in enumerate(_UpperCamelCase ):
__lowerCAmelCase : int = token.rstrip('\n' )
__lowerCAmelCase : str = index
return vocab
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE=2_00 ):
__lowerCAmelCase : Optional[Any] = vocab
__lowerCAmelCase : Optional[int] = unk_token
__lowerCAmelCase : List[Any] = max_input_chars_per_word
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = list(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > self.max_input_chars_per_word:
return [self.unk_token]
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : List[str] = []
while start < len(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = len(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = None
while start < end:
__lowerCAmelCase : List[str] = ''.join(chars[start:end] )
if substr in self.vocab:
__lowerCAmelCase : str = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = end
return sub_tokens
class A__ ( _lowerCamelCase):
A_ : Any = VOCAB_FILES_NAMES
A_ : Any = PRETRAINED_VOCAB_FILES_MAP
A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Optional[int] = ['input_ids', 'attention_mask']
A_ : int = False
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<d>" , _SCREAMING_SNAKE_CASE="</d>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="</n>" , _SCREAMING_SNAKE_CASE="</_>" , _SCREAMING_SNAKE_CASE="left" , **_SCREAMING_SNAKE_CASE , ):
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=_SCREAMING_SNAKE_CASE , eod_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , line_token=_SCREAMING_SNAKE_CASE , space_token=_SCREAMING_SNAKE_CASE , padding_side=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[Any] = bod_token
__lowerCAmelCase : List[Any] = eod_token
__lowerCAmelCase : Dict = load_vocab(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = self.encoder[space_token]
__lowerCAmelCase : Union[str, Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__lowerCAmelCase : int = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _SCREAMING_SNAKE_CASE : x[1] ) )
__lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
__lowerCAmelCase : List[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __lowerCamelCase ( self ):
return self.encoder[self.bod_token]
@property
def __lowerCamelCase ( self ):
return self.encoder[self.eod_token]
@property
def __lowerCamelCase ( self ):
return self.encoder["\n"]
@property
def __lowerCamelCase ( self ):
return len(self.encoder )
def __lowerCamelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = []
for x in jieba.cut(_SCREAMING_SNAKE_CASE , cut_all=_SCREAMING_SNAKE_CASE ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) )
return output_tokens
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = [i for i in token_ids if i >= 0]
__lowerCAmelCase : Tuple = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return token in self.encoder
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return "".join(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if os.path.isdir(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
__lowerCAmelCase : int = (filename_prefix + '-' if filename_prefix else '') + save_directory
__lowerCAmelCase : int = 0
if " " in self.encoder:
__lowerCAmelCase : Optional[int] = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
__lowerCAmelCase : Optional[int] = self.encoder['\n']
del self.encoder["\n"]
__lowerCAmelCase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _SCREAMING_SNAKE_CASE : x[1] ) )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
' Please check that the vocabulary is not corrupted!' )
__lowerCAmelCase : Optional[int] = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE ))
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE ))
| 86
|
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
a_ = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
a_ = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
a_ = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
a_ = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
a_ = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Optional[Any] ):
for tf_name, hf_name in patterns:
__lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase )
return k
def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ):
__lowerCamelCase = BigBirdPegasusConfig(**_UpperCamelCase )
__lowerCamelCase = BigBirdPegasusForConditionalGeneration(_UpperCamelCase )
__lowerCamelCase = torch_model.state_dict()
__lowerCamelCase = {}
# separating decoder weights
__lowerCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
__lowerCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
__lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
__lowerCamelCase = DECODER_PATTERNS
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__lowerCamelCase = v.T
__lowerCamelCase = torch.from_numpy(_UpperCamelCase )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
__lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
__lowerCamelCase = REMAINING_PATTERNS
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__lowerCamelCase = v.T
__lowerCamelCase = torch.from_numpy(_UpperCamelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
__lowerCamelCase = mapping['''model.embed_positions.weight''']
__lowerCamelCase = mapping.pop('''model.embed_positions.weight''' )
__lowerCamelCase ,__lowerCamelCase = torch_model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase )
__lowerCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def a__ ( _UpperCamelCase : int ):
__lowerCamelCase = tf.train.list_variables(_UpperCamelCase )
__lowerCamelCase = {}
__lowerCamelCase = ['''global_step''']
for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ):
__lowerCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = array
return tf_weights
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : dict ):
__lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase )
__lowerCamelCase = convert_bigbird_pegasus(_UpperCamelCase ,_UpperCamelCase )
torch_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
a_ = parser.parse_args()
a_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 330
| 0
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCamelCase_ = {'''UserAgent''': UserAgent().random}
def __magic_name__ ( __a : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ = script.contents[0]
UpperCamelCase__ = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = F"https://www.instagram.com/{username}/"
UpperCamelCase__ = self.get_json()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = requests.get(self.url , headers=SCREAMING_SNAKE_CASE_ ).text
UpperCamelCase__ = BeautifulSoup(SCREAMING_SNAKE_CASE_ , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ):
return F"{self.__class__.__name__}('{self.username}')"
def __str__(self ):
return F"{self.fullname} ({self.username}) is {self.biography}"
@property
def UpperCAmelCase_ (self ):
return self.user_data["username"]
@property
def UpperCAmelCase_ (self ):
return self.user_data["full_name"]
@property
def UpperCAmelCase_ (self ):
return self.user_data["biography"]
@property
def UpperCAmelCase_ (self ):
return self.user_data["business_email"]
@property
def UpperCAmelCase_ (self ):
return self.user_data["external_url"]
@property
def UpperCAmelCase_ (self ):
return self.user_data["edge_followed_by"]["count"]
@property
def UpperCAmelCase_ (self ):
return self.user_data["edge_follow"]["count"]
@property
def UpperCAmelCase_ (self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def UpperCAmelCase_ (self ):
return self.user_data["profile_pic_url_hd"]
@property
def UpperCAmelCase_ (self ):
return self.user_data["is_verified"]
@property
def UpperCAmelCase_ (self ):
return self.user_data["is_private"]
def __magic_name__ ( __a : str = "github" ):
'''simple docstring'''
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
UpperCamelCase__ = InstagramUser(__a )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __a )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = InstagramUser('''github''')
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }')
| 178
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
def __magic_name__ ( __a : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = R"""\w+[.]\d+"""
UpperCamelCase__ = re.findall(__a , __a )
for pat in pats:
UpperCamelCase__ = key.replace(__a , """_""".join(pat.split(""".""" ) ) )
return key
def __magic_name__ ( __a : str , __a : Dict , __a : int ):
'''simple docstring'''
UpperCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __magic_name__ ( __a : List[Any] , __a : List[Any] , __a : Optional[int]=42 ):
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCamelCase__ = flax_model.init_weights(PRNGKey(__a ) )
UpperCamelCase__ = flatten_dict(__a )
UpperCamelCase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = rename_key(__a )
UpperCamelCase__ = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(__a )
return unflatten_dict(__a )
| 178
| 1
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def A ( a_ ,a_ ) -> Union[str, Any]:
__UpperCamelCase : List[Any] =k_size // 2
__UpperCamelCase , __UpperCamelCase : Tuple =mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__UpperCamelCase : List[str] =1 / (2 * pi * sigma) * exp(-(square(a_ ) + square(a_ )) / (2 * square(a_ )) )
return g
def A ( a_ ,a_ ,a_ ) -> Any:
__UpperCamelCase , __UpperCamelCase : Optional[Any] =image.shape[0], image.shape[1]
# dst image height and width
__UpperCamelCase : List[str] =height - k_size + 1
__UpperCamelCase : Dict =width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__UpperCamelCase : Tuple =zeros((dst_height * dst_width, k_size * k_size) )
__UpperCamelCase : Optional[Any] =0
for i, j in product(range(a_ ) ,range(a_ ) ):
__UpperCamelCase : Union[str, Any] =ravel(image[i : i + k_size, j : j + k_size] )
__UpperCamelCase : List[Any] =window
row += 1
# turn the kernel into shape(k*k, 1)
__UpperCamelCase : List[str] =gen_gaussian_kernel(a_ ,a_ )
__UpperCamelCase : Any =ravel(a_ )
# reshape and get the dst image
__UpperCamelCase : int =dot(a_ ,a_ ).reshape(a_ ,a_ ).astype(a_ )
return dst
if __name__ == "__main__":
# read original image
A_ :Any = imread(R'''../image_data/lena.jpg''')
# turn image in gray scale value
A_ :Any = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
A_ :Any = gaussian_filter(gray, 3, sigma=1)
A_ :Any = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 71
|
from torch import nn
class __A ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
super().__init__()
__UpperCamelCase : Dict =class_size
__UpperCamelCase : Any =embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__UpperCamelCase : Any =nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self.mlp(lowerCamelCase__ )
return logits
| 71
| 1
|
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger()
@dataclass
class _lowerCAmelCase :
__UpperCAmelCase : nn.Module
__UpperCAmelCase : List[nn.Module] = field(default_factory=snake_case_ )
__UpperCAmelCase : list = field(default_factory=snake_case_ )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
snake_case : str = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase__ , nn.Convad ) or isinstance(UpperCamelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCamelCase__ )
def __call__( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCamelCase__ )
[x.remove() for x in self.handles]
return self
@property
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return list(filter(lambda UpperCamelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _lowerCAmelCase :
__UpperCAmelCase : nn.Module
__UpperCAmelCase : nn.Module
__UpperCAmelCase : int = 1
__UpperCAmelCase : List = field(default_factory=snake_case_ )
__UpperCAmelCase : List = field(default_factory=snake_case_ )
__UpperCAmelCase : bool = True
def __call__( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = Tracker(self.dest )(UpperCamelCase__ ).parametrized
snake_case : Any = Tracker(self.src )(UpperCamelCase__ ).parametrized
snake_case : List[str] = list(filter(lambda UpperCamelCase__ : type(UpperCamelCase__ ) not in self.src_skip , UpperCamelCase__ ) )
snake_case : int = list(filter(lambda UpperCamelCase__ : type(UpperCamelCase__ ) not in self.dest_skip , UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ) and self.raise_if_mismatch:
raise Exception(
F'Numbers of operations are different. Source module has {len(UpperCamelCase__ )} operations while'
F' destination module has {len(UpperCamelCase__ )}.' )
for dest_m, src_m in zip(UpperCamelCase__ , UpperCamelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
class _lowerCAmelCase ( nn.Module ):
def __init__( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), F'Unexpected layer name {k}'
snake_case : Tuple = len(UpperCamelCase__ ) + 1
feature_blocks.append((F'res{block_index}', v) )
snake_case : List[Any] = nn.ModuleDict(UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return get_trunk_forward_outputs(
UpperCamelCase__ , out_feat_keys=UpperCamelCase__ , feature_blocks=self._feature_blocks , )
class _lowerCAmelCase ( snake_case_ ):
def lowerCamelCase ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
snake_case : Any = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , UpperCamelCase__ ) -> Callable[[], Tuple[nn.Module, Dict]]:
'''simple docstring'''
if x not in self:
snake_case : List[str] = self.convert_name_to_timm(UpperCamelCase__ )
snake_case : List[Any] = partial(lambda: (timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval(), None) )
else:
snake_case : Union[str, Any] = super().__getitem__(UpperCamelCase__ )
return val
class _lowerCAmelCase ( snake_case_ ):
def __getitem__( self , UpperCamelCase__ ) -> Callable[[], nn.Module]:
'''simple docstring'''
if "seer" in x and "in1k" not in x:
snake_case : Dict = RegNetModel
else:
snake_case : Dict = RegNetForImageClassification
return val
def __lowerCAmelCase ( lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : List[Tuple[str, str]] ) -> List[Any]:
"""simple docstring"""
for from_key, to_key in keys:
snake_case : str = from_state_dict[from_key].clone()
print(F'Copied key={from_key} to={to_key}' )
return to_state_dict
def __lowerCAmelCase ( lowercase : str , lowercase : Callable[[], nn.Module] , lowercase : Callable[[], nn.Module] , lowercase : RegNetConfig , lowercase : Path , lowercase : bool = True , ) -> Any:
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
snake_case ,snake_case : Optional[int] = from_model_func()
snake_case : Tuple = our_model_func(lowercase ).eval()
snake_case : Dict = ModuleTransfer(src=lowercase , dest=lowercase , raise_if_mismatch=lowercase )
snake_case : List[str] = torch.randn((1, 3, 224, 224) )
module_transfer(lowercase )
if from_state_dict is not None:
snake_case : Optional[int] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
snake_case : Optional[Any] = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
snake_case : List[Any] = manually_copy_vissl_head(lowercase , our_model.state_dict() , lowercase )
our_model.load_state_dict(lowercase )
snake_case : List[Any] = our_model(lowercase , output_hidden_states=lowercase )
snake_case : Any = (
our_outputs.logits if isinstance(lowercase , lowercase ) else our_outputs.last_hidden_state
)
snake_case : int = from_model(lowercase )
snake_case : Union[str, Any] = from_output[-1] if type(lowercase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
snake_case : Optional[Any] = our_outputs.hidden_states[-1]
assert torch.allclose(lowercase , lowercase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=lowercase , )
snake_case : List[str] = 224 if "seer" not in name else 384
# we can use the convnext one
snake_case : List[str] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=lowercase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=lowercase , )
print(F'Pushed {name}' )
def __lowerCAmelCase ( lowercase : Path , lowercase : str = None , lowercase : bool = True ) -> str:
"""simple docstring"""
snake_case : Optional[int] = "imagenet-1k-id2label.json"
snake_case : Optional[Any] = 1000
snake_case : str = (1, num_labels)
snake_case : Any = "huggingface/label-files"
snake_case : Union[str, Any] = num_labels
snake_case : int = json.load(open(cached_download(hf_hub_url(lowercase , lowercase , repo_type="dataset" ) ) , "r" ) )
snake_case : Any = {int(lowercase ): v for k, v in idalabel.items()}
snake_case : str = idalabel
snake_case : Dict = {v: k for k, v in idalabel.items()}
snake_case : int = partial(lowercase , num_labels=lowercase , idalabel=lowercase , labelaid=lowercase )
snake_case : Any = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
}
snake_case : Optional[int] = NameToOurModelFuncMap()
snake_case : Tuple = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowercase : str , lowercase : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
snake_case : str = torch.hub.load_state_dict_from_url(lowercase , model_dir=str(lowercase ) , map_location="cpu" )
snake_case : List[str] = model_func()
# check if we have a head, if yes add it
snake_case : Optional[Any] = files["classy_state_dict"]["base_model"]["model"]
snake_case : Dict = model_state_dict["trunk"]
model.load_state_dict(lowercase )
return model.eval(), model_state_dict["heads"]
# pretrained
snake_case : Optional[Any] = partial(
lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case : Tuple = partial(
lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case : Optional[Any] = partial(
lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
snake_case : str = partial(
lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
snake_case : Optional[int] = partial(
lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case : Tuple = partial(
lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case : Optional[Any] = partial(
lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
snake_case : Union[str, Any] = partial(
lowercase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
lowercase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowercase , lowercase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowercase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowercase , lowercase , lowercase , )
return config, expected_shape
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported regnet* architecture,"""
""" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
__snake_case = parser.parse_args()
__snake_case = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 112
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowerCAmelCase ( lowercase : List[str] , lowercase : int , lowercase : Dict , lowercase : Dict , lowercase : int ) -> int:
"""simple docstring"""
with open(lowercase ) as metadata_file:
snake_case : str = json.load(lowercase )
snake_case : Optional[Any] = LukeConfig(use_entity_aware_attention=lowercase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
snake_case : Tuple = torch.load(lowercase , map_location="cpu" )["module"]
# Load the entity vocab file
snake_case : Optional[Any] = load_original_entity_vocab(lowercase )
# add an entry for [MASK2]
snake_case : Dict = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
snake_case : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case : Tuple = AddedToken("<ent>" , lstrip=lowercase , rstrip=lowercase )
snake_case : str = AddedToken("<ent2>" , lstrip=lowercase , rstrip=lowercase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(lowercase )
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "r" ) as f:
snake_case : str = json.load(lowercase )
snake_case : List[str] = "MLukeTokenizer"
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "w" ) as f:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(lowercase , lowercase )
snake_case : Dict = MLukeTokenizer.from_pretrained(lowercase )
# Initialize the embeddings of the special tokens
snake_case : Tuple = tokenizer.convert_tokens_to_ids(["@"] )[0]
snake_case : str = tokenizer.convert_tokens_to_ids(["#"] )[0]
snake_case : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
snake_case : str = word_emb[ent_init_index].unsqueeze(0 )
snake_case : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
snake_case : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
snake_case : Tuple = state_dict[bias_name]
snake_case : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
snake_case : Optional[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
snake_case : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case : Optional[int] = F'encoder.layer.{layer_index}.attention.self.'
snake_case : Optional[Any] = state_dict[prefix + matrix_name]
snake_case : Optional[Any] = state_dict[prefix + matrix_name]
snake_case : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case : List[Any] = state_dict["entity_embeddings.entity_embeddings.weight"]
snake_case : str = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
snake_case : Tuple = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
snake_case : Optional[int] = state_dict["entity_predictions.bias"]
snake_case : Optional[int] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
snake_case : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
snake_case : Union[str, Any] = LukeForMaskedLM(config=lowercase ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
snake_case : Tuple = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
snake_case : Any = state_dict[key]
else:
snake_case : Tuple = state_dict[key]
snake_case ,snake_case : Optional[Any] = model.load_state_dict(lowercase , strict=lowercase )
if set(lowercase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(lowercase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
snake_case : Optional[Any] = MLukeTokenizer.from_pretrained(lowercase , task="entity_classification" )
snake_case : List[str] = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
snake_case : str = (0, 9)
snake_case : Union[str, Any] = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
snake_case : int = model(**lowercase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case : int = torch.Size((1, 33, 768) )
snake_case : str = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case : Any = torch.Size((1, 1, 768) )
snake_case : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
snake_case : List[str] = MLukeTokenizer.from_pretrained(lowercase )
snake_case : List[Any] = "Tokyo is the capital of <mask>."
snake_case : Optional[Any] = (24, 30)
snake_case : List[str] = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
snake_case : Any = model(**lowercase )
snake_case : int = encoding["input_ids"][0].tolist()
snake_case : str = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
snake_case : Tuple = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase )
snake_case : Tuple = outputs.entity_logits[0][0].argmax().item()
snake_case : Optional[int] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase ) )
model.save_pretrained(lowercase )
def __lowerCAmelCase ( lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case : Tuple = ["[MASK]", "[PAD]", "[UNK]"]
snake_case : Optional[Any] = [json.loads(lowercase ) for line in open(lowercase )]
snake_case : Any = {}
for entry in data:
snake_case : Union[str, Any] = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
snake_case : Union[str, Any] = entity_id
break
snake_case : Dict = F'{language}:{entity_name}'
snake_case : str = entity_id
return new_mapping
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
__snake_case = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 112
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 178
| 0
|
"""simple docstring"""
import re
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' )
if match := re.search(UpperCamelCase__ , UpperCamelCase__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 154
|
"""simple docstring"""
def UpperCAmelCase ( UpperCamelCase__ = 100 ):
"""simple docstring"""
A__ = (n * (n + 1) // 2) ** 2
A__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 154
| 1
|
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
lowercase__ = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self: int, a_: str, a_: Tuple, a_: Dict = None, a_: str = 50_257, a_: Tuple = 1_024, a_: int = 768, a_: Dict = 12, a_: List[Any] = 12, a_: Tuple = None, a_: int = "gelu_new", a_: Union[str, Any] = 0.1, a_: int = 0.1, a_: Optional[int] = 0.1, a_: int = 1E-5, a_: Optional[Any] = 0.02, a_: List[Any] = True, a_: str = True, a_: Optional[int] = False, a_: Any = False, ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
_snake_case : int = prefix_inner_dim
_snake_case : List[Any] = prefix_hidden_dim
_snake_case : int = (
nn.Linear(self.prefix_inner_dim, self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_snake_case : Tuple = (
nn.Linear(self.prefix_hidden_dim, __lowercase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_snake_case : Union[str, Any] = GPTaConfig(
vocab_size=__lowercase, n_positions=__lowercase, n_embd=__lowercase, n_layer=__lowercase, n_head=__lowercase, n_inner=__lowercase, activation_function=__lowercase, resid_pdrop=__lowercase, embd_pdrop=__lowercase, attn_pdrop=__lowercase, layer_norm_epsilon=__lowercase, initializer_range=__lowercase, scale_attn_weights=__lowercase, use_cache=__lowercase, scale_attn_by_inverse_layer_idx=__lowercase, reorder_and_upcast_attn=__lowercase, )
_snake_case : List[str] = GPTaLMHeadModel(__lowercase )
def UpperCamelCase_ ( self: Tuple, a_: int, a_: int, a_: str = None, a_: Any = None, ):
'''simple docstring'''
_snake_case : Any = self.transformer.transformer.wte(__lowercase )
_snake_case : Union[str, Any] = self.encode_prefix(__lowercase )
_snake_case : List[str] = self.decode_prefix(__lowercase )
_snake_case : str = torch.cat((prefix_embeds, embedding_text), dim=1 )
if labels is not None:
_snake_case : List[Any] = self.get_dummy_token(input_ids.shape[0], input_ids.device )
_snake_case : int = torch.cat((dummy_token, input_ids), dim=1 )
_snake_case : int = self.transformer(inputs_embeds=__lowercase, labels=__lowercase, attention_mask=__lowercase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict ):
'''simple docstring'''
return torch.zeros(__lowercase, self.prefix_length, dtype=torch.intaa, device=__lowercase )
def UpperCamelCase_ ( self: Dict, a_: str ):
'''simple docstring'''
return self.encode_prefix(__lowercase )
@torch.no_grad()
def UpperCamelCase_ ( self: List[str], a_: Optional[Any], a_: Union[str, Any], a_: List[str] ):
'''simple docstring'''
_snake_case : Tuple = torch.split(__lowercase, 1, dim=0 )
_snake_case : Tuple = []
_snake_case : Union[str, Any] = []
for feature in features:
_snake_case : Optional[Any] = self.decode_prefix(feature.to(__lowercase ) ) # back to the clip feature
# Only support beam search for now
_snake_case : List[str] = self.generate_beam(
input_embeds=__lowercase, device=__lowercase, eos_token_id=__lowercase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_snake_case : Optional[Any] = torch.stack(__lowercase )
_snake_case : str = torch.stack(__lowercase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCamelCase_ ( self: Dict, a_: List[str]=None, a_: List[str]=None, a_: Any=None, a_: Optional[Any] = 5, a_: Union[str, Any] = 67, a_: str = 1.0, a_: Union[str, Any] = None, ):
'''simple docstring'''
_snake_case : Optional[Any] = eos_token_id
_snake_case : str = None
_snake_case : Optional[int] = None
_snake_case : List[Any] = torch.ones(__lowercase, device=__lowercase, dtype=torch.int )
_snake_case : List[str] = torch.zeros(__lowercase, device=__lowercase, dtype=torch.bool )
if input_embeds is not None:
_snake_case : Dict = input_embeds
else:
_snake_case : int = self.transformer.transformer.wte(__lowercase )
for i in range(__lowercase ):
_snake_case : List[str] = self.transformer(inputs_embeds=__lowercase )
_snake_case : List[str] = outputs.logits
_snake_case : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_snake_case : List[str] = logits.softmax(-1 ).log()
if scores is None:
_snake_case : Optional[Any] = logits.topk(__lowercase, -1 )
_snake_case : List[str] = generated.expand(__lowercase, *generated.shape[1:] )
_snake_case : Optional[int] = next_tokens.permute(1, 0 ), scores.squeeze(0 )
if tokens is None:
_snake_case : str = next_tokens
else:
_snake_case : List[Any] = tokens.expand(__lowercase, *tokens.shape[1:] )
_snake_case : List[str] = torch.cat((tokens, next_tokens), dim=1 )
else:
_snake_case : Dict = -float(np.inf )
_snake_case : Tuple = 0
_snake_case : List[str] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_snake_case : Optional[Any] = scores_sum / seq_lengths[:, None]
_snake_case : Optional[Any] = scores_sum_average.view(-1 ).topk(__lowercase, -1 )
_snake_case : str = next_tokens // scores_sum.shape[1]
_snake_case : Tuple = seq_lengths[next_tokens_source]
_snake_case : str = next_tokens % scores_sum.shape[1]
_snake_case : int = next_tokens.unsqueeze(1 )
_snake_case : Any = tokens[next_tokens_source]
_snake_case : Optional[int] = torch.cat((tokens, next_tokens), dim=1 )
_snake_case : Optional[Any] = generated[next_tokens_source]
_snake_case : str = scores_sum_average * seq_lengths
_snake_case : Union[str, Any] = is_stopped[next_tokens_source]
_snake_case : Optional[int] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0], 1, -1 )
_snake_case : int = torch.cat((generated, next_token_embed), dim=1 )
_snake_case : List[str] = is_stopped + next_tokens.eq(__lowercase ).squeeze()
if is_stopped.all():
break
_snake_case : Any = scores / seq_lengths
_snake_case : Optional[int] = scores.argsort(descending=__lowercase )
# tokens tensors are already padded to max_seq_length
_snake_case : int = [tokens[i] for i in order]
_snake_case : str = torch.stack(__lowercase, dim=0 )
_snake_case : List[str] = torch.tensor([seq_lengths[i] for i in order], dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 64
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__lowercase = (720, 1280) # Height, Width
__lowercase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__lowercase = 1 / 100
__lowercase = ''''''
__lowercase = ''''''
__lowercase = ''''''
__lowercase = 250
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :List[Any] = get_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for index in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = random.sample(range(len(SCREAMING_SNAKE_CASE ) ) , 4 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :str = update_image_and_anno(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , filter_scale=SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCamelCase :List[Any] = random_chars(32 )
__UpperCamelCase :List[str] = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCamelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
__UpperCamelCase :Optional[Any] = []
for anno in new_annos:
__UpperCamelCase :int = anno[3] - anno[1]
__UpperCamelCase :Optional[int] = anno[4] - anno[2]
__UpperCamelCase :int = anno[1] + width / 2
__UpperCamelCase :List[str] = anno[2] + height / 2
__UpperCamelCase :str = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(SCREAMING_SNAKE_CASE )
with open(f"""{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = []
__UpperCamelCase :str = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '''*.txt''' ) ):
__UpperCamelCase :Any = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE ) as in_file:
__UpperCamelCase :str = in_file.readlines()
__UpperCamelCase :Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , f"""{label_name}.jpg""" )
__UpperCamelCase :int = []
for obj_list in obj_lists:
__UpperCamelCase :Optional[int] = obj_list.rstrip('''\n''' ).split(''' ''' )
__UpperCamelCase :Any = float(obj[1] ) - float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) - float(obj[4] ) / 2
__UpperCamelCase :Dict = float(obj[1] ) + float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE )
labels.append(SCREAMING_SNAKE_CASE )
return img_paths, labels
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , ):
'''simple docstring'''
__UpperCamelCase :List[str] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__UpperCamelCase :List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :Optional[int] = int(scale_x * output_size[1] )
__UpperCamelCase :Any = int(scale_y * output_size[0] )
__UpperCamelCase :List[str] = []
__UpperCamelCase :Dict = []
for i, index in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Any = all_img_list[index]
path_list.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = all_annos[index]
__UpperCamelCase :Union[str, Any] = cva.imread(SCREAMING_SNAKE_CASE )
if i == 0: # top-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
__UpperCamelCase :Union[str, Any] = img
for bbox in img_annos:
__UpperCamelCase :Union[str, Any] = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = bbox[2] * scale_y
__UpperCamelCase :int = bbox[3] * scale_x
__UpperCamelCase :Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
__UpperCamelCase :List[str] = img
for bbox in img_annos:
__UpperCamelCase :str = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Dict = bbox[2] * scale_y
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Tuple = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Tuple = bbox[3] * scale_x
__UpperCamelCase :Dict = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__UpperCamelCase :Optional[int] = cva.resize(
SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__UpperCamelCase :List[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__UpperCamelCase :Optional[Any] = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 43
| 0
|
"""simple docstring"""
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = True
lowerCAmelCase__ :Tuple = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
order.append(_SCREAMING_SNAKE_CASE )
return order
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = True
lowerCAmelCase__ :Union[str, Any] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return component
def __A (_SCREAMING_SNAKE_CASE ) ->list[list[int]]:
"""simple docstring"""
lowerCAmelCase__ :Any = len(_SCREAMING_SNAKE_CASE ) * [False]
lowerCAmelCase__ :dict[int, list[int]] = {vert: [] for vert in range(len(_SCREAMING_SNAKE_CASE ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = []
for i, was_visited in enumerate(_SCREAMING_SNAKE_CASE ):
if not was_visited:
order += topology_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = []
lowerCAmelCase__ :int = len(_SCREAMING_SNAKE_CASE ) * [False]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :Dict = order[len(_SCREAMING_SNAKE_CASE ) - i - 1]
if not visited[vert]:
lowerCAmelCase__ :Union[str, Any] = find_components(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
components_list.append(_SCREAMING_SNAKE_CASE )
return components_list
| 254
|
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=9_9 , __UpperCAmelCase=3_2 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=3_7 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=1_6 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase="None" , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = parent
lowerCAmelCase__ :int = batch_size
lowerCAmelCase__ :List[str] = seq_length
lowerCAmelCase__ :Tuple = is_training
lowerCAmelCase__ :Tuple = use_input_mask
lowerCAmelCase__ :Dict = use_token_type_ids
lowerCAmelCase__ :Union[str, Any] = use_labels
lowerCAmelCase__ :Tuple = vocab_size
lowerCAmelCase__ :List[Any] = hidden_size
lowerCAmelCase__ :Tuple = num_hidden_layers
lowerCAmelCase__ :str = num_attention_heads
lowerCAmelCase__ :List[str] = intermediate_size
lowerCAmelCase__ :Optional[Any] = hidden_act
lowerCAmelCase__ :Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ :Any = attention_probs_dropout_prob
lowerCAmelCase__ :Dict = max_position_embeddings
lowerCAmelCase__ :Tuple = type_vocab_size
lowerCAmelCase__ :List[str] = type_sequence_label_size
lowerCAmelCase__ :Tuple = initializer_range
lowerCAmelCase__ :Optional[Any] = num_labels
lowerCAmelCase__ :int = num_choices
lowerCAmelCase__ :Union[str, Any] = relative_attention
lowerCAmelCase__ :int = position_biased_input
lowerCAmelCase__ :Optional[int] = pos_att_type
lowerCAmelCase__ :Dict = scope
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ :int = None
if self.use_input_mask:
lowerCAmelCase__ :int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase__ :Optional[Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ :Dict = None
lowerCAmelCase__ :Union[str, Any] = None
lowerCAmelCase__ :Dict = None
if self.use_labels:
lowerCAmelCase__ :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ :Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ :Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.get_config()
lowerCAmelCase__ :Optional[Any] = 3_0_0
return config
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = DebertaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Any = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )[0]
lowerCAmelCase__ :List[Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )[0]
lowerCAmelCase__ :Dict = model(__UpperCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = DebertaForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.num_labels
lowerCAmelCase__ :int = DebertaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.num_labels
lowerCAmelCase__ :Any = DebertaForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = DebertaForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :str = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) :Tuple = config_and_inputs
lowerCAmelCase__ :int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :List[str] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__magic_name__ :Optional[Any] = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ :Tuple = True
__magic_name__ :List[Any] = False
__magic_name__ :Optional[Any] = False
__magic_name__ :str = False
__magic_name__ :int = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = DebertaModelTester(self )
lowerCAmelCase__ :List[Any] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 )
def snake_case ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCAmelCase )
@slow
def snake_case ( self ):
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ :int = DebertaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def snake_case ( self ):
'''simple docstring'''
pass
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = DebertaModel.from_pretrained('microsoft/deberta-base' )
lowerCAmelCase__ :str = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase__ :Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ :int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
# compare the actual values for a slice.
lowerCAmelCase__ :str = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4 ) , F"{output[:, 1:4, 1:4]}" )
| 254
| 1
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = "arrow" , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : int =load_from_cache_file
a__ : Tuple =file_format
a__ : List[Any] =Spark(
df=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , working_dir=lowerCAmelCase__ , **lowerCAmelCase__ , )
def _lowercase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
a__ : str =None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCAmelCase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 95
|
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def __a(SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
if num <= 0:
raise ValueError("math domain error" )
return quad(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , args=(SCREAMING_SNAKE_CASE_) )[0]
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
return math.pow(SCREAMING_SNAKE_CASE_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 158
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class __snake_case ( _lowercase):
snake_case__ : str = "donut-swin"
snake_case__ : List[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : str , __lowerCAmelCase : str=2_2_4 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Tuple=9_6 , __lowerCAmelCase : Optional[int]=[2, 2, 6, 2] , __lowerCAmelCase : List[str]=[3, 6, 1_2, 2_4] , __lowerCAmelCase : List[str]=7 , __lowerCAmelCase : str=4.0 , __lowerCAmelCase : str=True , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : str=False , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : List[str]=1E-5 , **__lowerCAmelCase : Any , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : List[Any] = image_size
_lowerCamelCase : List[Any] = patch_size
_lowerCamelCase : Any = num_channels
_lowerCamelCase : str = embed_dim
_lowerCamelCase : List[str] = depths
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
_lowerCamelCase : List[str] = num_heads
_lowerCamelCase : str = window_size
_lowerCamelCase : List[str] = mlp_ratio
_lowerCamelCase : int = qkv_bias
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Optional[int] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = drop_path_rate
_lowerCamelCase : int = hidden_act
_lowerCamelCase : List[str] = use_absolute_embeddings
_lowerCamelCase : List[str] = layer_norm_eps
_lowerCamelCase : str = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase : List[Any] = int(embed_dim * 2 ** (len(__lowerCAmelCase ) - 1) )
| 175
|
"""simple docstring"""
def snake_case_ ( A_ : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(A_, A_ ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(A_, A_ ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175
| 1
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : int ) -> Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="""utf-8""" , check=__snake_case , )
assert hasattr(self , """env""" )
def lowercase__ ( self : Optional[int] , __snake_case : List[str]=1 ) -> Optional[Any]:
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-single" , instance_count=__snake_case , instance_type=self.instance_type , debugger_hook_config=__snake_case , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , )
def lowercase__ ( self : Any , __snake_case : Optional[int] ) -> Optional[Any]:
TrainingJobAnalytics(__snake_case ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" )
def lowercase__ ( self : int ) -> Any:
# create estimator
_lowerCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_lowerCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
_lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __snake_case )
| 70
|
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=[] ):
"""simple docstring"""
_lowerCAmelCase = size[0] - overlap_pixels * 2
_lowerCAmelCase = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_lowerCAmelCase = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
_lowerCAmelCase = np.pad(lowerCAmelCase , mode="""linear_ramp""" , pad_width=lowerCAmelCase , end_values=0 )
if "l" in remove_borders:
_lowerCAmelCase = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_lowerCAmelCase = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_lowerCAmelCase = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_lowerCAmelCase = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
return max(lowerCAmelCase , min(lowerCAmelCase , lowerCAmelCase ) )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = list(lowerCAmelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_lowerCAmelCase = clamp_rect(lowerCAmelCase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(lowerCAmelCase , (original_slice, 0) )
return result
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
_lowerCAmelCase = tile.crop(lowerCAmelCase )
return tile
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = n % d
return n - divisor
class UpperCAmelCase ( snake_case_ ):
def __init__( self : List[Any] , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : DDPMScheduler , __snake_case : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __snake_case : int = 3_50 , ) -> int:
super().__init__(
vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , unet=__snake_case , low_res_scheduler=__snake_case , scheduler=__snake_case , max_noise_level=__snake_case , )
def lowercase__ ( self : List[Any] , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : str ) -> int:
torch.manual_seed(0 )
_lowerCAmelCase = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
_lowerCAmelCase = add_overlap_rect(__snake_case , __snake_case , image.size )
_lowerCAmelCase = image.crop(__snake_case )
_lowerCAmelCase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_lowerCAmelCase = translated_slice_x - (original_image_slice / 2)
_lowerCAmelCase = max(0 , __snake_case )
_lowerCAmelCase = squeeze_tile(__snake_case , __snake_case , __snake_case , __snake_case )
_lowerCAmelCase = to_input.size
_lowerCAmelCase = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
_lowerCAmelCase = super(__snake_case , self ).__call__(image=__snake_case , **__snake_case ).images[0]
_lowerCAmelCase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
_lowerCAmelCase = unsqueeze_tile(__snake_case , __snake_case )
_lowerCAmelCase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
_lowerCAmelCase = []
if x == 0:
remove_borders.append("""l""" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("""r""" )
if y == 0:
remove_borders.append("""t""" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("""b""" )
_lowerCAmelCase = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__snake_case ) , mode="""L""" , )
final_image.paste(
__snake_case , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __snake_case )
@torch.no_grad()
def __call__( self : Union[str, Any] , __snake_case : Union[str, List[str]] , __snake_case : Union[PIL.Image.Image, List[PIL.Image.Image]] , __snake_case : int = 75 , __snake_case : float = 9.0 , __snake_case : int = 50 , __snake_case : Optional[Union[str, List[str]]] = None , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __snake_case : int = 1 , __snake_case : int = 1_28 , __snake_case : int = 32 , __snake_case : int = 32 , ) -> str:
_lowerCAmelCase = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) )
_lowerCAmelCase = math.ceil(image.size[0] / tile_size )
_lowerCAmelCase = math.ceil(image.size[1] / tile_size )
_lowerCAmelCase = tcx * tcy
_lowerCAmelCase = 0
for y in range(__snake_case ):
for x in range(__snake_case ):
self._process_tile(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , prompt=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , noise_level=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , )
current_count += 1
if callback is not None:
callback({"""progress""": current_count / total_tile_count, """image""": final_image} )
return final_image
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = """stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase = StableDiffusionTiledUpscalePipeline.from_pretrained(lowerCAmelCase , revision="""fp16""" , torch_dtype=torch.floataa )
_lowerCAmelCase = pipe.to("""cuda""" )
_lowerCAmelCase = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" )
def callback(lowerCAmelCase ):
print(f"progress: {obj['progress']:.4f}" )
obj["image"].save("""diffusers_library_progress.jpg""" )
_lowerCAmelCase = pipe(image=lowerCAmelCase , prompt="""Black font, white background, vector""" , noise_level=40 , callback=lowerCAmelCase )
final_image.save("""diffusers_library.jpg""" )
if __name__ == "__main__":
main()
| 70
| 1
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class snake_case_ (SCREAMING_SNAKE_CASE_ ):
def lowerCamelCase__( self :Any ) -> Optional[int]:
a__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a ,'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__a ,'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(__a ,'num_attention_heads' ) )
class snake_case_ :
def __init__( self :Any ,__snake_case :List[str] ,__snake_case :Optional[int]=13 ,__snake_case :int=32 ,__snake_case :Tuple=2 ,__snake_case :Tuple=3 ,__snake_case :Any=6_40 ,__snake_case :List[str]=4 ,__snake_case :Any="silu" ,__snake_case :int=3 ,__snake_case :List[str]=32 ,__snake_case :Optional[int]=0.1 ,__snake_case :Any=0.1 ,__snake_case :List[str]=0.1 ,__snake_case :List[Any]=0.02 ,__snake_case :List[str]=True ,__snake_case :str=True ,__snake_case :Optional[int]=10 ,__snake_case :Optional[Any]=None ,) -> Optional[Any]:
a__ = parent
a__ = batch_size
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = last_hidden_size
a__ = num_attention_heads
a__ = hidden_act
a__ = conv_kernel_size
a__ = output_stride
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = classifier_dropout_prob
a__ = use_labels
a__ = is_training
a__ = num_labels
a__ = initializer_range
a__ = scope
def lowerCamelCase__( self :Optional[int] ) -> Optional[int]:
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.num_labels )
a__ = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
a__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase__( self :List[str] ) -> int:
return MobileViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def lowerCamelCase__( self :str ,__snake_case :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :Optional[Any] ,__snake_case :List[str] ) -> Union[str, Any]:
a__ = MobileViTModel(config=__a )
model.to(__a )
model.eval()
a__ = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def lowerCamelCase__( self :Optional[Any] ,__snake_case :str ,__snake_case :Dict ,__snake_case :Optional[Any] ,__snake_case :Any ) -> int:
a__ = self.num_labels
a__ = MobileViTForImageClassification(__a )
model.to(__a )
model.eval()
a__ = model(__a ,labels=__a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCamelCase__( self :Dict ,__snake_case :List[Any] ,__snake_case :List[str] ,__snake_case :str ,__snake_case :Optional[int] ) -> Dict:
a__ = self.num_labels
a__ = MobileViTForSemanticSegmentation(__a )
model.to(__a )
model.eval()
a__ = model(__a )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
a__ = model(__a ,labels=__a )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def lowerCamelCase__( self :Any ) -> Any:
a__ = self.prepare_config_and_inputs()
a__ = config_and_inputs
a__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ (SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Any = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :Tuple ) -> Optional[Any]:
a__ = MobileViTModelTester(self )
a__ = MobileViTConfigTester(self ,config_class=__a ,has_text_modality=__a )
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowerCamelCase__( self :Any ) -> Union[str, Any]:
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowerCamelCase__( self :Tuple ) -> List[Any]:
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowerCamelCase__( self :int ) -> Union[str, Any]:
pass
def lowerCamelCase__( self :int ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__a )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__( self :Optional[int] ) -> Any:
pass
def lowerCamelCase__( self :Any ) -> Optional[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
def check_hidden_states_output(__snake_case :Any ,__snake_case :Tuple ,__snake_case :Optional[Any] ):
a__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(__a ,__a ) )
a__ = outputs.hidden_states
a__ = 5
self.assertEqual(len(__a ) ,__a )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
a__ = 2
for i in range(len(__a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2 )
a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = True
check_hidden_states_output(__a ,__a ,__a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ = True
check_hidden_states_output(__a ,__a ,__a )
def lowerCamelCase__( self :Optional[int] ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCamelCase__( self :int ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[int]:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = MobileViTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def __lowercase ( ):
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case_ (unittest.TestCase ):
@cached_property
def lowerCamelCase__( self :Union[str, Any] ) -> List[str]:
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowerCamelCase__( self :int ) -> Tuple:
a__ = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(__a )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=__a ,return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
a__ = model(**__a )
# verify the logits
a__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,__a )
a__ = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__a ,atol=1E-4 ) )
@slow
def lowerCamelCase__( self :Any ) -> Optional[int]:
a__ = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
a__ = model.to(__a )
a__ = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
a__ = prepare_img()
a__ = image_processor(images=__a ,return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
a__ = model(**__a )
a__ = outputs.logits
# verify the logits
a__ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape ,__a )
a__ = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
] ,device=__a ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,__a ,atol=1E-4 ) )
@slow
def lowerCamelCase__( self :List[str] ) -> Any:
a__ = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
a__ = model.to(__a )
a__ = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
a__ = prepare_img()
a__ = image_processor(images=__a ,return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
a__ = model(**__a )
a__ = outputs.logits.detach().cpu()
a__ = image_processor.post_process_semantic_segmentation(outputs=__a ,target_sizes=[(50, 60)] )
a__ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape ,__a )
a__ = image_processor.post_process_semantic_segmentation(outputs=__a )
a__ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape ,__a )
| 350
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case : str = logging.get_logger(__name__)
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] ):
a__ = b.T
a__ = np.sum(np.square(__lowerCAmelCase ) , axis=1 )
a__ = np.sum(np.square(__lowerCAmelCase ) , axis=0 )
a__ = np.matmul(__lowerCAmelCase , __lowerCAmelCase )
a__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str ):
a__ = x.reshape(-1 , 3 )
a__ = squared_euclidean_distance(__lowerCAmelCase , __lowerCAmelCase )
return np.argmin(__lowerCAmelCase , axis=1 )
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = ['''pixel_values''']
def __init__( self :Dict ,__snake_case :Optional[Union[List[List[int]], np.ndarray]] = None ,__snake_case :bool = True ,__snake_case :Dict[str, int] = None ,__snake_case :PILImageResampling = PILImageResampling.BILINEAR ,__snake_case :bool = True ,__snake_case :bool = True ,**__snake_case :Optional[int] ,) -> None:
super().__init__(**__snake_case )
a__ = size if size is not None else {'height': 2_56, 'width': 2_56}
a__ = get_size_dict(__snake_case )
a__ = np.array(__snake_case ) if clusters is not None else None
a__ = do_resize
a__ = size
a__ = resample
a__ = do_normalize
a__ = do_color_quantize
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :Dict[str, int] ,__snake_case :PILImageResampling = PILImageResampling.BILINEAR ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :Any ,) -> np.ndarray:
a__ = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
__snake_case ,size=(size['height'], size['width']) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :List[Any] ,__snake_case :np.ndarray ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,) -> np.ndarray:
a__ = rescale(image=__snake_case ,scale=1 / 1_27.5 ,data_format=__snake_case )
a__ = image - 1
return image
def lowerCamelCase__( self :Optional[Any] ,__snake_case :ImageInput ,__snake_case :bool = None ,__snake_case :Dict[str, int] = None ,__snake_case :PILImageResampling = None ,__snake_case :bool = None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[List[List[int]], np.ndarray]] = None ,__snake_case :Optional[Union[str, TensorType]] = None ,__snake_case :Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST ,**__snake_case :List[str] ,) -> PIL.Image.Image:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = size if size is not None else self.size
a__ = get_size_dict(__snake_case )
a__ = resample if resample is not None else self.resample
a__ = do_normalize if do_normalize is not None else self.do_normalize
a__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
a__ = clusters if clusters is not None else self.clusters
a__ = np.array(__snake_case )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
a__ = [self.resize(image=__snake_case ,size=__snake_case ,resample=__snake_case ) for image in images]
if do_normalize:
a__ = [self.normalize(image=__snake_case ) for image in images]
if do_color_quantize:
a__ = [to_channel_dimension_format(__snake_case ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
a__ = np.array(__snake_case )
a__ = color_quantize(__snake_case ,__snake_case ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
a__ = images.shape[0]
a__ = images.reshape(__snake_case ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
a__ = list(__snake_case )
else:
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'input_ids': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 109
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = CycleDiffusionPipeline
_lowerCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
_lowerCamelCase : int = PipelineTesterMixin.required_optional_params - {'latents'}
_lowerCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
_lowerCamelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __A ( self : int ):
torch.manual_seed(0 )
A_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
A_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
A_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ = CLIPTextModel(_a )
A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int]=0 ):
A_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
A_ = image / 2 + 0.5
if str(_a ).startswith("mps" ):
A_ = torch.manual_seed(_a )
else:
A_ = torch.Generator(device=_a ).manual_seed(_a )
A_ = {
"prompt": "An astronaut riding an elephant",
"source_prompt": "An astronaut riding a horse",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"eta": 0.1,
"strength": 0.8,
"guidance_scale": 3,
"source_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def __A ( self : Any ):
A_ = "cpu" # ensure determinism for the device-dependent torch.Generator
A_ = self.get_dummy_components()
A_ = CycleDiffusionPipeline(**_a )
A_ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ = self.get_dummy_inputs(_a )
A_ = pipe(**_a )
A_ = output.images
A_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __A ( self : List[Any] ):
A_ = self.get_dummy_components()
for name, module in components.items():
if hasattr(_a , "half" ):
A_ = module.half()
A_ = CycleDiffusionPipeline(**_a )
A_ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ = self.get_dummy_inputs(_a )
A_ = pipe(**_a )
A_ = output.images
A_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __A ( self : str ):
return super().test_save_load_local()
@unittest.skip("non-deterministic pipeline" )
def __A ( self : Any ):
return super().test_inference_batch_single_identical()
@skip_mps
def __A ( self : Tuple ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __A ( self : List[str] ):
return super().test_save_load_optional_components()
@skip_mps
def __A ( self : Optional[Any] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
A_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
A_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" )
A_ = init_image.resize((512, 512) )
A_ = "CompVis/stable-diffusion-v1-4"
A_ = DDIMScheduler.from_pretrained(_a , subfolder="scheduler" )
A_ = CycleDiffusionPipeline.from_pretrained(
_a , scheduler=_a , safety_checker=_a , torch_dtype=torch.floataa , revision="fp16" )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
A_ = "A black colored car"
A_ = "A blue colored car"
A_ = torch.manual_seed(0 )
A_ = pipe(
prompt=_a , source_prompt=_a , image=_a , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_a , output_type="np" , )
A_ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def __A ( self : List[Any] ):
A_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
A_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" )
A_ = init_image.resize((512, 512) )
A_ = "CompVis/stable-diffusion-v1-4"
A_ = DDIMScheduler.from_pretrained(_a , subfolder="scheduler" )
A_ = CycleDiffusionPipeline.from_pretrained(_a , scheduler=_a , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
A_ = "A black colored car"
A_ = "A blue colored car"
A_ = torch.manual_seed(0 )
A_ = pipe(
prompt=_a , source_prompt=_a , image=_a , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_a , output_type="np" , )
A_ = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 312
|
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a=None , **_a ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , _a , )
super().__init__(args=_a , **_a )
| 45
| 0
|
import torch
from torch import nn
class SCREAMING_SNAKE_CASE__ (nn.Module ):
def __init__( self , a , a , a , a , a=1 , a=False):
super().__init__()
lowercase__ : Optional[int] = n_token
lowercase__ : Optional[Any] = d_embed
lowercase__ : Union[str, Any] = d_proj
lowercase__ : Optional[Any] = cutoffs + [n_token]
lowercase__ : Any = [0] + self.cutoffs
lowercase__ : Optional[Any] = div_val
lowercase__ : str = self.cutoffs[0]
lowercase__ : Union[str, Any] = len(self.cutoffs) - 1
lowercase__ : Tuple = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowercase__ : Optional[int] = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
lowercase__ : Union[str, Any] = nn.Parameter(torch.zeros(self.n_clusters))
lowercase__ : Optional[Any] = nn.ModuleList()
lowercase__ : Optional[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a)))
else:
self.out_projs.append(a)
self.out_layers.append(nn.Linear(a , a))
else:
for i in range(len(self.cutoffs)):
lowercase__ : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase__ : List[str] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a)))
self.out_layers.append(nn.Linear(a , r_idx - l_idx))
lowercase__ : Dict = keep_order
def snake_case_ ( self , a , a , a , a):
if proj is None:
lowercase__ : int = nn.functional.linear(a , a , bias=a)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowercase__ : Optional[int] = nn.functional.linear(a , proj.t().contiguous())
lowercase__ : Any = nn.functional.linear(a , a , bias=a)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def snake_case_ ( self , a , a=None , a=False):
if labels is not None:
# Shift so that tokens < n predict n
lowercase__ : Tuple = hidden[..., :-1, :].contiguous()
lowercase__ : Optional[int] = labels[..., 1:].contiguous()
lowercase__ : Optional[int] = hidden.view(-1 , hidden.size(-1))
lowercase__ : List[Any] = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError('Input and labels should have the same size in the batch dimension.')
else:
lowercase__ : Dict = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
lowercase__ : int = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
lowercase__ : Union[str, Any] = labels != -100
lowercase__ : Dict = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device)
lowercase__ : Any = (
-nn.functional.log_softmax(a , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
lowercase__ : Optional[int] = nn.functional.log_softmax(a , dim=-1)
else:
# construct weights and biases
lowercase__ : Union[str, Any] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
lowercase__ : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase__ : Any = self.out_layers[0].weight[l_idx:r_idx]
lowercase__ : Optional[Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase__ : Dict = self.out_layers[i].weight
lowercase__ : Union[str, Any] = self.out_layers[i].bias
if i == 0:
lowercase__ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0)
lowercase__ : Dict = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(a)
biases.append(a)
lowercase__ : Tuple = weights[0], biases[0], self.out_projs[0]
lowercase__ : Tuple = self._compute_logit(a , a , a , a)
lowercase__ : Dict = nn.functional.log_softmax(a , dim=1)
if labels is None:
lowercase__ : Tuple = hidden.new_empty((head_logit.size(0), self.n_token))
else:
lowercase__ : Optional[Any] = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device)
lowercase__ : Dict = 0
lowercase__ : List[str] = [0] + self.cutoffs
for i in range(len(a) - 1):
lowercase__ : Any = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowercase__ : Optional[Any] = (labels >= l_idx) & (labels < r_idx)
lowercase__ : Union[str, Any] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowercase__ : Any = labels.index_select(0 , a) - l_idx
lowercase__ : Union[str, Any] = head_logprob.index_select(0 , a)
lowercase__ : List[Any] = hidden.index_select(0 , a)
else:
lowercase__ : Any = hidden
if i == 0:
if labels is not None:
lowercase__ : List[Any] = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
lowercase__ : Any = head_logprob[:, : self.cutoffs[0]]
else:
lowercase__ : Any = weights[i], biases[i], self.out_projs[i]
lowercase__ : Tuple = self._compute_logit(a , a , a , a)
lowercase__ : Tuple = nn.functional.log_softmax(a , dim=1)
lowercase__ : Tuple = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowercase__ : List[Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
lowercase__ : List[Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowercase__ : Tuple = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order') and self.keep_order) or keep_order:
out.index_copy_(0 , a , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def snake_case_ ( self , a):
if self.n_clusters == 0:
lowercase__ : List[Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(a , dim=-1)
else:
# construct weights and biases
lowercase__ : Optional[Any] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
lowercase__ : str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase__ : Dict = self.out_layers[0].weight[l_idx:r_idx]
lowercase__ : Optional[Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase__ : Any = self.out_layers[i].weight
lowercase__ : Optional[Any] = self.out_layers[i].bias
if i == 0:
lowercase__ : Any = torch.cat([weight_i, self.cluster_weight] , dim=0)
lowercase__ : Any = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(a)
biases.append(a)
lowercase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowercase__ : List[Any] = self._compute_logit(a , a , a , a)
lowercase__ : int = hidden.new_empty((head_logit.size(0), self.n_token))
lowercase__ : str = nn.functional.log_softmax(a , dim=1)
lowercase__ : Optional[Any] = [0] + self.cutoffs
for i in range(len(a) - 1):
lowercase__ : str = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowercase__ : Optional[int] = head_logprob[:, : self.cutoffs[0]]
else:
lowercase__ : List[str] = weights[i], biases[i], self.out_projs[i]
lowercase__ : Union[str, Any] = self._compute_logit(a , a , a , a)
lowercase__ : Dict = nn.functional.log_softmax(a , dim=1)
lowercase__ : Dict = head_logprob[:, -i] + tail_logprob_i
lowercase__ : Union[str, Any] = logprob_i
return out
| 355
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 216
| 0
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : Tuple ,_snake_case : List[str]=7 ,_snake_case : Dict=3 ,_snake_case : List[str]=30 ,_snake_case : Union[str, Any]=400 ,_snake_case : Union[str, Any]=True ,_snake_case : Any=None ,_snake_case : List[Any]=True ,_snake_case : Optional[Any]=[0.5, 0.5, 0.5] ,_snake_case : Dict=[0.5, 0.5, 0.5] ,_snake_case : Union[str, Any]=True ,_snake_case : Union[str, Any]=1 / 255 ,_snake_case : int=True ,) -> List[Any]:
"""simple docstring"""
lowercase__ : Dict = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
lowercase__ : Optional[Any] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Tuple = num_channels
lowercase__ : Tuple = min_resolution
lowercase__ : Optional[int] = max_resolution
lowercase__ : List[Any] = do_resize
lowercase__ : str = size
lowercase__ : str = do_normalize
lowercase__ : Dict = image_mean
lowercase__ : Tuple = image_std
lowercase__ : List[str] = do_rescale
lowercase__ : Union[str, Any] = rescale_factor
lowercase__ : List[Any] = do_pad
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[Any] ,_snake_case : List[Any]=False ) -> Any:
"""simple docstring"""
if not batched:
lowercase__ : List[str] = image_inputs[0]
if isinstance(_snake_case ,Image.Image ):
lowercase__ : Union[str, Any] = image.size
else:
lowercase__ : str = image.shape[1], image.shape[2]
if w < h:
lowercase__ : Any = int(self.size['''shortest_edge'''] * h / w )
lowercase__ : List[Any] = self.size['shortest_edge']
elif w > h:
lowercase__ : Tuple = self.size['shortest_edge']
lowercase__ : str = int(self.size['''shortest_edge'''] * w / h )
else:
lowercase__ : Any = self.size['shortest_edge']
lowercase__ : Tuple = self.size['shortest_edge']
else:
lowercase__ : Dict = []
for image in image_inputs:
lowercase__ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__ : str = max(_snake_case ,key=lambda _snake_case : item[0] )[0]
lowercase__ : Optional[Any] = max(_snake_case ,key=lambda _snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __A ( __A ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
lowercase__ : Optional[int] = ConditionalDetrImageProcessingTester(self )
@property
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case ,'''image_mean''' ) )
self.assertTrue(hasattr(_snake_case ,'''image_std''' ) )
self.assertTrue(hasattr(_snake_case ,'''do_normalize''' ) )
self.assertTrue(hasattr(_snake_case ,'''do_resize''' ) )
self.assertTrue(hasattr(_snake_case ,'''size''' ) )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 18, '''longest_edge''': 1_333} )
self.assertEqual(image_processor.do_pad ,_snake_case )
lowercase__ : Any = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,max_size=84 ,pad_and_return_pixel_mask=_snake_case )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad ,_snake_case )
def UpperCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case ,Image.Image )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
lowercase__ : Optional[Any] = self.image_processor_tester.get_expected_values(_snake_case )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowercase__ : Dict = self.image_processor_tester.get_expected_values(_snake_case ,batched=_snake_case )
lowercase__ : Optional[int] = image_processing(_snake_case ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_snake_case ,numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case ,np.ndarray )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
lowercase__ : Optional[int] = self.image_processor_tester.get_expected_values(_snake_case )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowercase__ : List[str] = image_processing(_snake_case ,return_tensors='''pt''' ).pixel_values
lowercase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_snake_case ,batched=_snake_case )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_snake_case ,torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case ,torch.Tensor )
# Test not batched input
lowercase__ : List[str] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
lowercase__ : Dict = self.image_processor_tester.get_expected_values(_snake_case )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowercase__ : Any = image_processing(_snake_case ,return_tensors='''pt''' ).pixel_values
lowercase__ : Dict = self.image_processor_tester.get_expected_values(_snake_case ,batched=_snake_case )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' ,'''r''' ) as f:
lowercase__ : List[str] = json.loads(f.read() )
lowercase__ : int = {'image_id': 39_769, 'annotations': target}
# encode them
lowercase__ : int = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
lowercase__ : Any = image_processing(images=_snake_case ,annotations=_snake_case ,return_tensors='''pt''' )
# verify pixel values
lowercase__ : int = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape ,_snake_case )
lowercase__ : Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] ,_snake_case ,atol=1e-4 ) )
# verify area
lowercase__ : Union[str, Any] = torch.tensor([5887.9600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] ,_snake_case ) )
# verify boxes
lowercase__ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape ,_snake_case )
lowercase__ : Union[str, Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] ,_snake_case ,atol=1e-3 ) )
# verify image_id
lowercase__ : str = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] ,_snake_case ) )
# verify is_crowd
lowercase__ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] ,_snake_case ) )
# verify class_labels
lowercase__ : List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] ,_snake_case ) )
# verify orig_size
lowercase__ : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] ,_snake_case ) )
# verify size
lowercase__ : str = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] ,_snake_case ) )
@slow
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' ,'''r''' ) as f:
lowercase__ : int = json.loads(f.read() )
lowercase__ : Any = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
lowercase__ : List[str] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowercase__ : int = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
lowercase__ : str = image_processing(images=_snake_case ,annotations=_snake_case ,masks_path=_snake_case ,return_tensors='''pt''' )
# verify pixel values
lowercase__ : List[Any] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape ,_snake_case )
lowercase__ : Optional[Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] ,_snake_case ,atol=1e-4 ) )
# verify area
lowercase__ : Dict = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] ,_snake_case ) )
# verify boxes
lowercase__ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape ,_snake_case )
lowercase__ : Optional[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] ,_snake_case ,atol=1e-3 ) )
# verify image_id
lowercase__ : Optional[int] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] ,_snake_case ) )
# verify is_crowd
lowercase__ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] ,_snake_case ) )
# verify class_labels
lowercase__ : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] ,_snake_case ) )
# verify masks
lowercase__ : Optional[int] = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() ,_snake_case )
# verify orig_size
lowercase__ : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] ,_snake_case ) )
# verify size
lowercase__ : Optional[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] ,_snake_case ) )
| 16
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if hor == 1_28:
A_ : List[Any] = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A_ : Tuple = (32, 1_28, 2_56)
A_ : Optional[int] = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
A_ : Union[str, Any] = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A_ : Any = (32, 64, 1_28, 2_56)
A_ : int = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
A_ : List[str] = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
A_ : List[Any] = model.state_dict()
A_ : List[str] = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
A_ : Union[str, Any] = UNetaDModel(**__lowercase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A_ : Optional[Any] = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A_ : Optional[int] = state_dict.pop(__lowercase )
hf_value_function.load_state_dict(__lowercase )
torch.save(hf_value_function.state_dict() ,f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' ,'w' ) as f:
json.dump(__lowercase ,__lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Any = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
A_ : Union[str, Any] = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
A_ : List[Any] = model
A_ : Union[str, Any] = UNetaDModel(**__lowercase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A_ : Optional[int] = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A_ : List[str] = state_dict.pop(__lowercase )
hf_value_function.load_state_dict(__lowercase )
torch.save(hf_value_function.state_dict() ,'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' ,'w' ) as f:
json.dump(__lowercase ,__lowercase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 140
| 0
|
def A (__A : int ) -> str:
"""simple docstring"""
if isinstance(__A , __A ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(__A , __A ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
UpperCAmelCase_ = False
if num < 0:
UpperCAmelCase_ = True
UpperCAmelCase_ = -num
UpperCAmelCase_ = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__A ) for e in binary )
return "0b" + "".join(str(__A ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( a ):
UpperCAmelCase__ : Optional[int] = (DPMSolverSinglestepScheduler,)
UpperCAmelCase__ : str = (('''num_inference_steps''', 2_5),)
def lowerCamelCase ( self : Dict , **_snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_snake_case)
return config
def lowerCamelCase ( self : Dict , _snake_case : int=0 , **_snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_snake_case , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any]=0 , **_snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Dict , _snake_case : int=None , **_snake_case : Optional[Any]):
"""simple docstring"""
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
return sample
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_5_7_4) < 1e-3
def lowerCamelCase ( self : int):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(thresholding=_snake_case)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , algorithm_type='''dpmsolver++''' , solver_order=_snake_case , solver_type=_snake_case , )
def lowerCamelCase ( self : Dict):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
UpperCAmelCase_ = self.full_loop(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
assert not torch.isnan(_snake_case).any(), "Samples have nan numbers"
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lower_order_final=_snake_case)
self.check_over_configs(lower_order_final=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def lowerCamelCase ( self : int):
"""simple docstring"""
self.check_over_configs(variance_type=_snake_case)
self.check_over_configs(variance_type='''learned_range''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_snake_case , time_step=0)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_2_4_8) < 1e-3
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.1_4_5_3) < 1e-3
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.0_6_4_9) < 1e-3
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_snake_case , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
assert sample.dtype == torch.floataa
| 7
| 1
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
UpperCamelCase__ : Union[str, Any] = '''MobileNetV1Config'''
# Base docstring
UpperCamelCase__ : Optional[Any] = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase__ : str = [1, 10_24, 7, 7]
# Image classification docstring
UpperCamelCase__ : Any = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase__ : Optional[Any] = '''tabby, tabby cat'''
UpperCamelCase__ : Any = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCAmelCase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: str , _lowerCamelCase: Tuple=None ):
__SCREAMING_SNAKE_CASE : List[str] = {}
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = model.mobilenet_va
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = model
__SCREAMING_SNAKE_CASE : List[Any] = """MobilenetV1/Conv2d_0/"""
__SCREAMING_SNAKE_CASE : List[str] = backbone.conv_stem.convolution.weight
__SCREAMING_SNAKE_CASE : Optional[Any] = backbone.conv_stem.normalization.bias
__SCREAMING_SNAKE_CASE : Optional[int] = backbone.conv_stem.normalization.weight
__SCREAMING_SNAKE_CASE : str = backbone.conv_stem.normalization.running_mean
__SCREAMING_SNAKE_CASE : Dict = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__SCREAMING_SNAKE_CASE : List[Any] = i + 1
__SCREAMING_SNAKE_CASE : Dict = i * 2
__SCREAMING_SNAKE_CASE : List[str] = backbone.layer[pt_index]
__SCREAMING_SNAKE_CASE : int = F"MobilenetV1/Conv2d_{tf_index}_depthwise/"
__SCREAMING_SNAKE_CASE : List[str] = pointer.convolution.weight
__SCREAMING_SNAKE_CASE : Optional[Any] = pointer.normalization.bias
__SCREAMING_SNAKE_CASE : str = pointer.normalization.weight
__SCREAMING_SNAKE_CASE : Tuple = pointer.normalization.running_mean
__SCREAMING_SNAKE_CASE : int = pointer.normalization.running_var
__SCREAMING_SNAKE_CASE : Optional[Any] = backbone.layer[pt_index + 1]
__SCREAMING_SNAKE_CASE : Dict = F"MobilenetV1/Conv2d_{tf_index}_pointwise/"
__SCREAMING_SNAKE_CASE : Optional[Any] = pointer.convolution.weight
__SCREAMING_SNAKE_CASE : Optional[int] = pointer.normalization.bias
__SCREAMING_SNAKE_CASE : Optional[Any] = pointer.normalization.weight
__SCREAMING_SNAKE_CASE : str = pointer.normalization.running_mean
__SCREAMING_SNAKE_CASE : int = pointer.normalization.running_var
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
__SCREAMING_SNAKE_CASE : str = model.classifier.weight
__SCREAMING_SNAKE_CASE : Tuple = model.classifier.bias
return tf_to_pt_map
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] , _lowerCamelCase: Tuple , _lowerCamelCase: Dict ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
__SCREAMING_SNAKE_CASE : Tuple = tf.train.list_variables(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = {}
for name, shape in init_vars:
logger.info(F"Loading TF weight {name} with shape {shape}" )
__SCREAMING_SNAKE_CASE : Optional[int] = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = array
# Build TF to PyTorch weights loading map
__SCREAMING_SNAKE_CASE : Union[str, Any] = _build_tf_to_pytorch_map(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(F"Importing {name}" )
if name not in tf_weights:
logger.info(F"{name} not in tf pre-trained weights, skipping" )
continue
__SCREAMING_SNAKE_CASE : Optional[int] = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
__SCREAMING_SNAKE_CASE : Any = np.transpose(_lowerCamelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
__SCREAMING_SNAKE_CASE : List[str] = array.squeeze().transpose()
else:
__SCREAMING_SNAKE_CASE : List[Any] = np.transpose(_lowerCamelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(F"Initialize PyTorch weight {name} {array.shape}" )
__SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(_lowerCamelCase )
tf_weights.pop(_lowerCamelCase , _lowerCamelCase )
tf_weights.pop(name + """/RMSProp""" , _lowerCamelCase )
tf_weights.pop(name + """/RMSProp_1""" , _lowerCamelCase )
tf_weights.pop(name + """/ExponentialMovingAverage""" , _lowerCamelCase )
logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def lowerCAmelCase_ ( _lowerCamelCase: torch.Tensor , _lowerCamelCase: nn.Convad ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = features.shape[-2:]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = conv_layer.stride
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = conv_layer.kernel_size
if in_height % stride_height == 0:
__SCREAMING_SNAKE_CASE : Optional[int] = max(kernel_height - stride_height , 0 )
else:
__SCREAMING_SNAKE_CASE : Any = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = max(kernel_width - stride_width , 0 )
else:
__SCREAMING_SNAKE_CASE : List[str] = max(kernel_width - (in_width % stride_width) , 0 )
__SCREAMING_SNAKE_CASE : List[Any] = pad_along_width // 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = pad_along_width - pad_left
__SCREAMING_SNAKE_CASE : Dict = pad_along_height // 2
__SCREAMING_SNAKE_CASE : Any = pad_along_height - pad_top
__SCREAMING_SNAKE_CASE : List[str] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_lowerCamelCase , _lowerCamelCase , """constant""" , 0.0 )
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Tuple = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups." )
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups." )
__SCREAMING_SNAKE_CASE : Any = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , padding_mode="""zeros""" , )
if use_normalization:
__SCREAMING_SNAKE_CASE : str = nn.BatchNormad(
num_features=lowerCAmelCase__ , eps=config.layer_norm_eps , momentum=0.99_97 , affine=lowerCAmelCase__ , track_running_stats=lowerCAmelCase__ , )
else:
__SCREAMING_SNAKE_CASE : List[Any] = None
if use_activation:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : List[str] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Any = ACTaFN[config.hidden_act]
else:
__SCREAMING_SNAKE_CASE : str = config.hidden_act
else:
__SCREAMING_SNAKE_CASE : List[str] = None
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
__SCREAMING_SNAKE_CASE : Union[str, Any] = apply_tf_padding(lowerCAmelCase__ , self.convolution )
__SCREAMING_SNAKE_CASE : Tuple = self.convolution(lowerCAmelCase__ )
if self.normalization is not None:
__SCREAMING_SNAKE_CASE : List[Any] = self.normalization(lowerCAmelCase__ )
if self.activation is not None:
__SCREAMING_SNAKE_CASE : List[Any] = self.activation(lowerCAmelCase__ )
return features
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Optional[int] = MobileNetVaConfig
_A : Optional[int] = load_tf_weights_in_mobilenet_va
_A : Tuple = '''mobilenet_v1'''
_A : Tuple = '''pixel_values'''
_A : Optional[Any] = False
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCAmelCase__ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCamelCase__ : int = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase__ : Any = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , lowerCamelCase__ , )
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : bool = True ):
"""simple docstring"""
super().__init__(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = config
__SCREAMING_SNAKE_CASE : Any = 3_2
__SCREAMING_SNAKE_CASE : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
__SCREAMING_SNAKE_CASE : Union[str, Any] = MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=config.num_channels , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=2 , )
__SCREAMING_SNAKE_CASE : Dict = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__SCREAMING_SNAKE_CASE : Tuple = nn.ModuleList()
for i in range(1_3 ):
__SCREAMING_SNAKE_CASE : List[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__SCREAMING_SNAKE_CASE : int = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=strides[i] , groups=lowerCAmelCase__ , ) )
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=1 , ) )
__SCREAMING_SNAKE_CASE : List[Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__SCREAMING_SNAKE_CASE : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_stem(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__SCREAMING_SNAKE_CASE : Any = layer_module(lowerCAmelCase__ )
if output_hidden_states:
__SCREAMING_SNAKE_CASE : Union[str, Any] = all_hidden_states + (hidden_states,)
__SCREAMING_SNAKE_CASE : str = hidden_states
if self.pooler is not None:
__SCREAMING_SNAKE_CASE : List[str] = torch.flatten(self.pooler(lowerCAmelCase__ ) , start_dim=1 )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , lowerCamelCase__ , )
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : str , lowerCAmelCase__ : MobileNetVaConfig ):
"""simple docstring"""
super().__init__(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = config.num_labels
__SCREAMING_SNAKE_CASE : int = MobileNetVaModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__SCREAMING_SNAKE_CASE : Dict = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = nn.Linear(lowerCAmelCase__ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = return_dict if return_dict is not None else self.config.use_return_dict
__SCREAMING_SNAKE_CASE : Optional[Any] = self.mobilenet_va(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
__SCREAMING_SNAKE_CASE : Tuple = self.classifier(self.dropout(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__SCREAMING_SNAKE_CASE : List[str] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__SCREAMING_SNAKE_CASE : Union[str, Any] = """single_label_classification"""
else:
__SCREAMING_SNAKE_CASE : List[Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
__SCREAMING_SNAKE_CASE : List[Any] = MSELoss()
if self.num_labels == 1:
__SCREAMING_SNAKE_CASE : Any = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
__SCREAMING_SNAKE_CASE : List[str] = CrossEntropyLoss()
__SCREAMING_SNAKE_CASE : List[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__SCREAMING_SNAKE_CASE : Optional[Any] = BCEWithLogitsLoss()
__SCREAMING_SNAKE_CASE : Tuple = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
__SCREAMING_SNAKE_CASE : Any = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states , )
| 112
|
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
UpperCamelCase__ : List[Any] = {
'''E''': 1_2.7_0,
'''T''': 9.0_6,
'''A''': 8.1_7,
'''O''': 7.5_1,
'''I''': 6.9_7,
'''N''': 6.7_5,
'''S''': 6.3_3,
'''H''': 6.0_9,
'''R''': 5.9_9,
'''D''': 4.2_5,
'''L''': 4.0_3,
'''C''': 2.7_8,
'''U''': 2.7_6,
'''M''': 2.4_1,
'''W''': 2.3_6,
'''F''': 2.2_3,
'''G''': 2.0_2,
'''Y''': 1.9_7,
'''P''': 1.9_3,
'''B''': 1.2_9,
'''V''': 0.9_8,
'''K''': 0.7_7,
'''J''': 0.1_5,
'''X''': 0.1_5,
'''Q''': 0.1_0,
'''Z''': 0.0_7,
}
UpperCamelCase__ : Optional[Any] = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
UpperCamelCase__ : Dict = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : int = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowerCAmelCase_ ( _lowerCamelCase: tuple ):
return x[0]
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : Dict = get_letter_count(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = """""".join(freq_to_letter[freq] )
__SCREAMING_SNAKE_CASE : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_lowerCamelCase , reverse=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : List[Any] = get_frequency_order(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : int
_snake_case : Node | None = None
_snake_case : Node | None = None
def a__ ( ) -> Node | None:
"""simple docstring"""
_UpperCamelCase = Node(1 )
_UpperCamelCase = Node(2 )
_UpperCamelCase = Node(3 )
_UpperCamelCase = Node(4 )
_UpperCamelCase = Node(5 )
return tree
def a__ ( lowercase : Node | None ) -> list[int]:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a__ ( lowercase : Node | None ) -> list[int]:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a__ ( lowercase : Node | None ) -> list[int]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a__ ( lowercase : Node | None ) -> int:
"""simple docstring"""
return (max(height(root.left ), height(root.right ) ) + 1) if root else 0
def a__ ( lowercase : Node | None ) -> Sequence[Node | None]:
"""simple docstring"""
_UpperCamelCase = []
if root is None:
return output
_UpperCamelCase = deque([root] )
while process_queue:
_UpperCamelCase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a__ ( lowercase : Node | None, lowercase : int ) -> Sequence[Node | None]:
"""simple docstring"""
_UpperCamelCase = []
def populate_output(lowercase : Node | None, lowercase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left, level - 1 )
populate_output(root.right, level - 1 )
populate_output(lowercase, lowercase )
return output
def a__ ( lowercase : Node | None, lowercase : int ) -> Sequence[Node | None]:
"""simple docstring"""
_UpperCamelCase = []
def populate_output(lowercase : Node | None, lowercase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right, level - 1 )
populate_output(root.left, level - 1 )
populate_output(lowercase, lowercase )
return output
def a__ ( lowercase : Node | None ) -> Sequence[Node | None] | list[Any]:
"""simple docstring"""
if root is None:
return []
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = height(lowercase )
for h in range(1, height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowercase, lowercase ) )
_UpperCamelCase = 1
else:
output.append(get_nodes_from_right_to_left(lowercase, lowercase ) )
_UpperCamelCase = 0
return output
def a__ ( ) -> None: # Main function for testing.
"""simple docstring"""
_UpperCamelCase = make_tree()
print(F"""In-order Traversal: {inorder(lowercase )}""" )
print(F"""Pre-order Traversal: {preorder(lowercase )}""" )
print(F"""Post-order Traversal: {postorder(lowercase )}""", '''\n''' )
print(F"""Height of Tree: {height(lowercase )}""", '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(lowercase ), '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1, height(lowercase ) + 1 ):
print(F"""Level {level}:""", get_nodes_from_left_to_right(lowercase, level=lowercase ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 287
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
def snake_case__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
_UpperCamelCase , _UpperCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=lowerCAmelCase__ , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
_UpperCamelCase = controlnet_params
_UpperCamelCase = '''bird'''
_UpperCamelCase = jax.device_count()
_UpperCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
_UpperCamelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
_UpperCamelCase = jax.random.PRNGKey(0 )
_UpperCamelCase = jax.random.split(lowerCAmelCase__ , jax.device_count() )
_UpperCamelCase = replicate(lowerCAmelCase__ )
_UpperCamelCase = shard(lowerCAmelCase__ )
_UpperCamelCase = shard(lowerCAmelCase__ )
_UpperCamelCase = pipe(
prompt_ids=lowerCAmelCase__ , image=lowerCAmelCase__ , params=lowerCAmelCase__ , prng_seed=lowerCAmelCase__ , num_inference_steps=50 , jit=lowerCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_UpperCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase = images[0, 253:256, 253:256, -1]
_UpperCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
_UpperCamelCase , _UpperCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=lowerCAmelCase__ , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
_UpperCamelCase = controlnet_params
_UpperCamelCase = '''Chef in the kitchen'''
_UpperCamelCase = jax.device_count()
_UpperCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
_UpperCamelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
_UpperCamelCase = jax.random.PRNGKey(0 )
_UpperCamelCase = jax.random.split(lowerCAmelCase__ , jax.device_count() )
_UpperCamelCase = replicate(lowerCAmelCase__ )
_UpperCamelCase = shard(lowerCAmelCase__ )
_UpperCamelCase = shard(lowerCAmelCase__ )
_UpperCamelCase = pipe(
prompt_ids=lowerCAmelCase__ , image=lowerCAmelCase__ , params=lowerCAmelCase__ , prng_seed=lowerCAmelCase__ , num_inference_steps=50 , jit=lowerCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_UpperCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase = images[0, 253:256, 253:256, -1]
_UpperCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 287
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = '''▁'''
_snake_case = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_snake_case = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
_snake_case = {
'''facebook/mbart-large-en-ro''': 10_24,
'''facebook/mbart-large-cc25''': 10_24,
}
# fmt: off
_snake_case = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class UpperCAmelCase_ ( A_ ):
'''simple docstring'''
__A : int = VOCAB_FILES_NAMES
__A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__A : str = ["input_ids", "attention_mask"]
__A : str = []
__A : Dict = []
def __init__( self , __A , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=None , __A=None , __A=None , __A = None , __A=None , **__A , ):
"""simple docstring"""
lowerCamelCase : Any = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
lowerCamelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , tokenizer_file=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case_ ) )
lowerCamelCase : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase : List[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase : Dict = 1
lowerCamelCase : Union[str, Any] = len(self.sp_model )
lowerCamelCase : Optional[int] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(snake_case_ )
}
lowerCamelCase : Any = {v: k for k, v in self.lang_code_to_id.items()}
lowerCamelCase : Union[str, Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCamelCase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCamelCase : int = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowerCamelCase : List[str] = src_lang if src_lang is not None else "en_XX"
lowerCamelCase : Dict = self.lang_code_to_id[self._src_lang]
lowerCamelCase : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = self.__dict__.copy()
lowerCamelCase : List[str] = None
lowerCamelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __A ):
"""simple docstring"""
lowerCamelCase : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase : Optional[int] = {}
lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _snake_case ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , __A , __A = None , __A = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
lowerCamelCase : Dict = [1] * len(self.prefix_tokens )
lowerCamelCase : int = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
lowerCamelCase : Any = [self.sep_token_id]
lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , __A , __A , __A , __A , **__A ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase : Union[str, Any] = src_lang
lowerCamelCase : Any = self(snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , **snake_case_ )
lowerCamelCase : Optional[int] = self.convert_tokens_to_ids(snake_case_ )
lowerCamelCase : Union[str, Any] = tgt_lang_id
return inputs
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , __A ):
"""simple docstring"""
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def _snake_case ( self , __A ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase : List[Any] = self.sp_model.PieceToId(snake_case_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , __A ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : List[Any] = "".join(snake_case_ ).replace(snake_case_ , " " ).strip()
return out_string
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
if not os.path.isdir(snake_case_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase : Dict = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , "wb" ) as fi:
lowerCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
def _snake_case ( self , __A , __A = "en_XX" , __A = None , __A = "ro_RO" , **__A , ):
"""simple docstring"""
lowerCamelCase : List[Any] = src_lang
lowerCamelCase : List[str] = tgt_lang
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def _snake_case ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Optional[int] = self.lang_code_to_id[src_lang]
lowerCamelCase : Dict = []
lowerCamelCase : str = [self.eos_token_id, self.cur_lang_code]
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : str = self.lang_code_to_id[lang]
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
| 283
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class UpperCAmelCase_ ( A_ ):
lowercase__ = '''codegen'''
lowercase__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Any , snake_case_ : int=50_400 , snake_case_ : Any=2_048 , snake_case_ : List[str]=2_048 , snake_case_ : List[Any]=4_096 , snake_case_ : Union[str, Any]=28 , snake_case_ : List[str]=16 , snake_case_ : Dict=64 , snake_case_ : Dict=None , snake_case_ : List[Any]="gelu_new" , snake_case_ : List[str]=0.0 , snake_case_ : int=0.0 , snake_case_ : List[str]=0.0 , snake_case_ : Optional[Any]=1e-5 , snake_case_ : Optional[int]=0.02 , snake_case_ : Dict=True , snake_case_ : Union[str, Any]=50_256 , snake_case_ : Optional[Any]=50_256 , snake_case_ : Optional[Any]=False , **snake_case_ : Any , ) -> List[Any]:
'''simple docstring'''
A__ = vocab_size
A__ = n_ctx
A__ = n_positions
A__ = n_embd
A__ = n_layer
A__ = n_head
A__ = n_inner
A__ = rotary_dim
A__ = activation_function
A__ = resid_pdrop
A__ = embd_pdrop
A__ = attn_pdrop
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = use_cache
A__ = bos_token_id
A__ = eos_token_id
super().__init__(
bos_token_id=snake_case_ , eos_token_id=snake_case_ , tie_word_embeddings=snake_case_ , **snake_case_ )
class UpperCAmelCase_ ( A_ ):
def __init__( self : Any , snake_case_ : PretrainedConfig , snake_case_ : str = "default" , snake_case_ : List[PatchingSpec] = None , snake_case_ : bool = False , ) -> int:
'''simple docstring'''
super().__init__(snake_case_ , task=snake_case_ , patching_specs=snake_case_ , use_past=snake_case_ )
if not getattr(self._config , "pad_token_id" , snake_case_ ):
# TODO: how to do that better?
A__ = 0
@property
def __magic_name__ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
A__ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction="inputs" )
A__ = {0: "batch", 1: "past_sequence + sequence"}
else:
A__ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __magic_name__ ( self : List[Any] ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def __magic_name__ ( self : Optional[int] ) -> int:
'''simple docstring'''
return self._config.n_head
def __magic_name__ ( self : str , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
A__ = super(snake_case_ , self ).generate_dummy_inputs(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
# We need to order the input in the way they appears in the forward()
A__ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A__, A__ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A__ = seqlen + 2
A__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A__ = [
(torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(self.num_layers )
]
A__ = common_inputs["attention_mask"]
if self.use_past:
A__ = ordered_inputs["attention_mask"].dtype
A__ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 )
return ordered_inputs
@property
def __magic_name__ ( self : Optional[int] ) -> int:
'''simple docstring'''
return 13
| 247
| 0
|
from string import ascii_lowercase, ascii_uppercase
def a__ ( UpperCAmelCase : str ) -> str:
if not sentence:
return ""
UpperCAmelCase : Tuple = dict(zip(UpperCAmelCase , UpperCAmelCase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 99
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __UpperCAmelCase :
def __init__( self : Any, __A : List[Any], __A : Optional[Any]=2, __A : List[Any]=3_2, __A : Tuple=1_6, __A : int=3, __A : Any=True, __A : List[Any]=True, __A : List[Any]=3_2, __A : List[Any]=4, __A : Union[str, Any]=[0, 1, 2, 3], __A : List[Any]=4, __A : Optional[int]=3_7, __A : int="gelu", __A : Any=0.1, __A : Tuple=0.1, __A : Any=0.0_2, __A : List[str]=3, __A : int=[1, 3_8_4, 2_4, 2_4], __A : Any=True, __A : List[str]=None, ):
UpperCAmelCase : List[str] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Tuple = image_size
UpperCAmelCase : Dict = patch_size
UpperCAmelCase : str = num_channels
UpperCAmelCase : Tuple = is_training
UpperCAmelCase : Optional[Any] = use_labels
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : Optional[int] = num_hidden_layers
UpperCAmelCase : str = backbone_out_indices
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Dict = intermediate_size
UpperCAmelCase : Union[str, Any] = hidden_act
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : str = initializer_range
UpperCAmelCase : Optional[int] = num_labels
UpperCAmelCase : int = backbone_featmap_shape
UpperCAmelCase : Union[str, Any] = scope
UpperCAmelCase : int = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase : Any = (image_size // patch_size) ** 2
UpperCAmelCase : Optional[Any] = num_patches + 1
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Dict ):
UpperCAmelCase : List[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [9_6, 1_9_2, 3_8_4, 7_6_8],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, backbone_out_indices=self.backbone_out_indices, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, is_hybrid=self.is_hybrid, backbone_config=__A, backbone_featmap_shape=self.backbone_featmap_shape, )
def __magic_name__ ( self : Optional[Any], __A : List[Any], __A : Union[str, Any], __A : Tuple ):
UpperCAmelCase : Optional[Any] = DPTModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : int = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Optional[int], __A : Any, __A : Dict, __A : Optional[int] ):
UpperCAmelCase : Optional[Any] = self.num_labels
UpperCAmelCase : List[Any] = DPTForDepthEstimation(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Tuple = model(__A )
self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size) )
def __magic_name__ ( self : Union[str, Any], __A : Dict, __A : List[Any], __A : Optional[int] ):
UpperCAmelCase : Dict = self.num_labels
UpperCAmelCase : Tuple = DPTForSemanticSegmentation(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A, labels=__A )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : str = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = config_and_inputs
UpperCAmelCase : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : int = DPTModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def __magic_name__ ( self : int ):
pass
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, nn.Linear ) )
def __magic_name__ ( self : Dict ):
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = model_class(__A )
UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[int] = [*signature.parameters.keys()]
UpperCAmelCase : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : Any ):
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
def __magic_name__ ( self : Union[str, Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : str = True
if model_class in get_values(__A ):
continue
UpperCAmelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.train()
UpperCAmelCase : str = self._prepare_for_class(__A, __A, return_labels=__A )
UpperCAmelCase : Union[str, Any] = model(**__A ).loss
loss.backward()
def __magic_name__ ( self : Optional[int] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : int = False
UpperCAmelCase : int = True
if model_class in get_values(__A ) or not model_class.supports_gradient_checkpointing:
continue
UpperCAmelCase : Dict = model_class(__A )
model.to(__A )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase : List[str] = self._prepare_for_class(__A, __A, return_labels=__A )
UpperCAmelCase : Any = model(**__A ).loss
loss.backward()
def __magic_name__ ( self : Dict ):
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[Any] = _config_zero_init(__A )
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(config=__A )
# Skip the check for the backbone
UpperCAmelCase : Dict = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCAmelCase : Optional[Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __magic_name__ ( self : Optional[int] ):
pass
@slow
def __magic_name__ ( self : Optional[Any] ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCAmelCase : Optional[int] = DPTModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __magic_name__ ( self : int ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : int = '''add'''
with self.assertRaises(__A ):
UpperCAmelCase : Dict = DPTForDepthEstimation(__A )
def a__ ( ) -> Tuple:
UpperCAmelCase : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Dict = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCAmelCase : Tuple = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(__A )
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : int = model(**__A )
UpperCAmelCase : int = outputs.predicted_depth
# verify the predicted depth
UpperCAmelCase : Tuple = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape, __A )
UpperCAmelCase : Dict = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__A )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0, __A, atol=1E-4 ) )
| 99
| 1
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
snake_case : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class _snake_case ( datasets.BuilderConfig ):
SCREAMING_SNAKE_CASE__ = 1_0000
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
class _snake_case ( datasets.ArrowBasedBuilder ):
SCREAMING_SNAKE_CASE__ = ParquetConfig
def SCREAMING_SNAKE_CASE__ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
a :Union[str, Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase , (str, list, tuple) ):
a :Dict = data_files
if isinstance(_lowerCamelCase , _lowerCamelCase ):
a :List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a :List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
a :List[str] = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
a :Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a :Any = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_lowerCamelCase ):
with open(_lowerCamelCase , '''rb''' ) as f:
a :int = datasets.Features.from_arrow_schema(pq.read_schema(_lowerCamelCase ) )
break
splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a :Dict = table_cast(_lowerCamelCase , self.info.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :str = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
with open(_lowerCamelCase , '''rb''' ) as f:
a :str = pq.ParquetFile(_lowerCamelCase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
a :int = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(_lowerCamelCase )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}''' )
raise
| 94
|
from collections import defaultdict
from math import gcd
def _a ( UpperCamelCase_ : int = 1_500_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(UpperCamelCase_ )
lowerCAmelCase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase_ , 2 ):
if gcd(UpperCamelCase_ , UpperCamelCase_ ) > 1:
continue
lowerCAmelCase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 340
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = UnCLIPImageVariationPipeline
__magic_name__ = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
__magic_name__ = IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
__magic_name__ = False
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return self.time_input_dim
@property
def a_ ( self ):
return self.time_input_dim * 4
@property
def a_ ( self ):
return 1_0_0
@property
def a_ ( self ):
snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(__snake_case )
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , )
return CLIPVisionModelWithProjection(__snake_case )
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
snake_case = UnCLIPTextProjModel(**__snake_case )
return model
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = {
'''sample_size''': 3_2,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def a_ ( self ):
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def a_ ( self ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
snake_case = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def a_ ( self ):
snake_case = self.dummy_decoder
snake_case = self.dummy_text_proj
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_super_res_first
snake_case = self.dummy_super_res_last
snake_case = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
snake_case = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
snake_case = CLIPImageProcessor(crop_size=3_2 , size=3_2 )
snake_case = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def a_ ( self , __snake_case , __snake_case=0 , __snake_case=True ):
snake_case = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith('''mps''' ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
if pil_image:
snake_case = input_image * 0.5 + 0.5
snake_case = input_image.clamp(0 , 1 )
snake_case = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case = DiffusionPipeline.numpy_to_pil(__snake_case )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
snake_case = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = torch.device('''cpu''' )
class A__ :
"""simple docstring"""
__magic_name__ = 1
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device=__snake_case ).manual_seed(0 )
snake_case = pipe.decoder.dtype
snake_case = 1
snake_case = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
snake_case = pipe.prepare_latents(
__snake_case , dtype=__snake_case , device=__snake_case , generator=__snake_case , latents=__snake_case , scheduler=DummyScheduler() )
snake_case = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
snake_case = pipe.prepare_latents(
__snake_case , dtype=__snake_case , device=__snake_case , generator=__snake_case , latents=__snake_case , scheduler=DummyScheduler() )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , decoder_latents=__snake_case , super_res_latents=__snake_case ).images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
# Don't pass image, instead pass embedding
snake_case = pipeline_inputs.pop('''image''' )
snake_case = pipe.image_encoder(__snake_case ).image_embeds
snake_case = pipe(
**__snake_case , decoder_latents=__snake_case , super_res_latents=__snake_case , image_embeddings=__snake_case , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def a_ ( self ):
snake_case = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
snake_case = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=__snake_case , expected_max_diff=__snake_case )
@skip_mps
def a_ ( self ):
snake_case = torch_device == '''cpu'''
snake_case = True
snake_case = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=__snake_case , relax_max_difference=__snake_case , additional_params_copy_to_batched_inputs=__snake_case , )
def a_ ( self ):
snake_case = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
snake_case = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__snake_case , additional_params_copy_to_batched_inputs=__snake_case , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__snake_case )
@skip_mps
def a_ ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a_ ( self ):
return super().test_save_load_local()
@skip_mps
def a_ ( self ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self ):
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
snake_case = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case = pipeline(
__snake_case , generator=__snake_case , output_type='''np''' , )
snake_case = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(__snake_case , __snake_case , 1_5 )
| 213
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_=False ):
"""simple docstring"""
snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
snake_case = ''''''
else:
snake_case = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
snake_case = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case = in_proj_weight[
: config.hidden_size, :
]
snake_case = in_proj_bias[: config.hidden_size]
snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case = in_proj_weight[
-config.hidden_size :, :
]
snake_case = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ ,UpperCamelCase_ )
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ ,UpperCamelCase_ )
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = dct.pop(UpperCamelCase_ )
snake_case = val
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = ViTMSNConfig()
snake_case = 10_00
snake_case = '''datasets/huggingface/label-files'''
snake_case = '''imagenet-1k-id2label.json'''
snake_case = json.load(open(hf_hub_download(UpperCamelCase_ ,UpperCamelCase_ ) ,'''r''' ) )
snake_case = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
snake_case = 3_84
snake_case = 15_36
snake_case = 6
elif "l16" in checkpoint_url:
snake_case = 10_24
snake_case = 40_96
snake_case = 24
snake_case = 16
snake_case = 0.1
elif "b4" in checkpoint_url:
snake_case = 4
elif "l7" in checkpoint_url:
snake_case = 7
snake_case = 10_24
snake_case = 40_96
snake_case = 24
snake_case = 16
snake_case = 0.1
snake_case = ViTMSNModel(UpperCamelCase_ )
snake_case = torch.hub.load_state_dict_from_url(UpperCamelCase_ ,map_location='''cpu''' )['''target_encoder''']
snake_case = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCamelCase_ )
snake_case = create_rename_keys(UpperCamelCase_ ,base_model=UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ ,UpperCamelCase_ ,base_model=UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case = Image.open(requests.get(UpperCamelCase_ ,stream=UpperCamelCase_ ).raw )
snake_case = ViTImageProcessor(
size=config.image_size ,image_mean=UpperCamelCase_ ,image_std=UpperCamelCase_ )
snake_case = image_processor(images=UpperCamelCase_ ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
snake_case = model(**UpperCamelCase_ )
snake_case = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
snake_case = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
snake_case = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
snake_case = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
snake_case = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
snake_case = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] ,UpperCamelCase_ ,atol=1e-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 213
| 1
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _lowercase :
def __init__( self : str , snake_case : List[Any] , snake_case : str=1_3 , snake_case : Dict=3_2 , snake_case : Union[str, Any]=2 , snake_case : List[Any]=3 , snake_case : str=1_6 , snake_case : str=[1, 2, 1] , snake_case : Tuple=[2, 2, 4] , snake_case : int=2 , snake_case : str=2.0 , snake_case : List[str]=True , snake_case : Tuple=0.0 , snake_case : Union[str, Any]=0.0 , snake_case : int=0.1 , snake_case : str="gelu" , snake_case : Union[str, Any]=False , snake_case : Any=True , snake_case : str=0.02 , snake_case : List[str]=1e-5 , snake_case : Optional[int]=True , snake_case : Optional[Any]=None , snake_case : int=True , snake_case : List[Any]=1_0 , snake_case : Optional[Any]=8 , snake_case : List[Any]=["stage1", "stage2", "stage3"] , snake_case : Any=[1, 2, 3] , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = parent
UpperCamelCase_ : Optional[int] = batch_size
UpperCamelCase_ : List[Any] = image_size
UpperCamelCase_ : str = patch_size
UpperCamelCase_ : List[str] = num_channels
UpperCamelCase_ : Tuple = embed_dim
UpperCamelCase_ : List[str] = depths
UpperCamelCase_ : Optional[Any] = num_heads
UpperCamelCase_ : Any = window_size
UpperCamelCase_ : Tuple = mlp_ratio
UpperCamelCase_ : Any = qkv_bias
UpperCamelCase_ : str = hidden_dropout_prob
UpperCamelCase_ : str = attention_probs_dropout_prob
UpperCamelCase_ : Union[str, Any] = drop_path_rate
UpperCamelCase_ : Tuple = hidden_act
UpperCamelCase_ : Dict = use_absolute_embeddings
UpperCamelCase_ : Any = patch_norm
UpperCamelCase_ : Tuple = layer_norm_eps
UpperCamelCase_ : List[Any] = initializer_range
UpperCamelCase_ : str = is_training
UpperCamelCase_ : Union[str, Any] = scope
UpperCamelCase_ : int = use_labels
UpperCamelCase_ : List[Any] = type_sequence_label_size
UpperCamelCase_ : int = encoder_stride
UpperCamelCase_ : str = out_features
UpperCamelCase_ : int = out_indices
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ : Tuple = None
if self.use_labels:
UpperCamelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : int = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : Optional[Any] , snake_case : int , snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Tuple = MaskFormerSwinModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : str = model(snake_case )
UpperCamelCase_ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase_ : str = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : int , snake_case : List[Any] , snake_case : Any ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Dict = MaskFormerSwinBackbone(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : Optional[int] = model(snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(snake_case ):
UpperCamelCase_ : Optional[int] = ['stem']
UpperCamelCase_ : List[str] = MaskFormerSwinBackbone(config=snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.prepare_config_and_inputs()
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : Any = config_and_inputs
UpperCamelCase_ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : int = MaskFormerSwinModelTester(self )
UpperCamelCase_ : List[Any] = ConfigTester(self , config_class=snake_case , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
return
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case )
@unittest.skip('Swin does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Any = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : int = model_class(snake_case )
UpperCamelCase_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : Dict = [*signature.parameters.keys()]
UpperCamelCase_ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Dict , snake_case : List[Any] , snake_case : Dict , snake_case : Dict ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
UpperCamelCase_ : List[Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCamelCase_ : Any = outputs.hidden_states
UpperCamelCase_ : Union[str, Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case ) , snake_case )
# Swin has a different seq_length
UpperCamelCase_ : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase_ : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCamelCase_ : int = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ : int = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : Any = 3
UpperCamelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase_ : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase_ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase_ : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCamelCase_ : Any = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ : Optional[Any] = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(snake_case : int ):
UpperCamelCase_ : Tuple = 0
return t
def check_equivalence(snake_case : str , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int={} ):
with torch.no_grad():
UpperCamelCase_ : Any = model(**snake_case , return_dict=snake_case , **snake_case )
UpperCamelCase_ : Dict = model(**snake_case , return_dict=snake_case , **snake_case ).to_tuple()
def recursive_check(snake_case : str , snake_case : str ):
if isinstance(snake_case , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(snake_case , snake_case ):
recursive_check(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(snake_case , snake_case )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(snake_case ) , set_nan_tensor_to_zero(snake_case ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
f" {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}. Dict has"
f" `nan`: {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}."
) , )
recursive_check(snake_case , snake_case )
for model_class in self.all_model_classes:
UpperCamelCase_ : Any = model_class(snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : int = self._prepare_for_class(snake_case , snake_case )
UpperCamelCase_ : int = self._prepare_for_class(snake_case , snake_case )
check_equivalence(snake_case , snake_case , snake_case )
UpperCamelCase_ : List[str] = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
UpperCamelCase_ : Tuple = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
check_equivalence(snake_case , snake_case , snake_case )
UpperCamelCase_ : str = self._prepare_for_class(snake_case , snake_case )
UpperCamelCase_ : Dict = self._prepare_for_class(snake_case , snake_case )
check_equivalence(snake_case , snake_case , snake_case , {'output_hidden_states': True} )
UpperCamelCase_ : int = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
UpperCamelCase_ : Union[str, Any] = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
check_equivalence(snake_case , snake_case , snake_case , {'output_hidden_states': True} )
@require_torch
class _lowercase ( unittest.TestCase , snake_case_ ):
lowercase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase = MaskFormerSwinConfig
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : int = MaskFormerSwinModelTester(self )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : Any = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
UpperCamelCase_ : List[Any] = backbone_class(snake_case )
backbone.to(snake_case )
backbone.eval()
UpperCamelCase_ : List[Any] = backbone(**snake_case )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , snake_case )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
UpperCamelCase_ : str = backbone(**snake_case , output_hidden_states=snake_case )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : Optional[int] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
UpperCamelCase_ : Tuple = backbone(**snake_case , output_attentions=snake_case )
self.assertIsNotNone(outputs.attentions )
| 175
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase ( ):
UpperCamelCase_ : Optional[Any] = HfArgumentParser(lowerCamelCase )
UpperCamelCase_ : Tuple = parser.parse_args_into_dataclasses()[0]
UpperCamelCase_ : Dict = TensorFlowBenchmark(args=lowerCamelCase )
try:
UpperCamelCase_ : Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCamelCase_ : Any = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
UpperCamelCase_ : Optional[int] = ' '.join(str(lowerCamelCase ).split(' ' )[:-1] )
UpperCamelCase_ : Any = ''
UpperCamelCase_ : Any = eval(str(lowerCamelCase ).split(' ' )[-1] )
UpperCamelCase_ : List[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
UpperCamelCase_ : List[str] = full_error_msg + begin_error_msg + str(lowerCamelCase )
raise ValueError(lowerCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 175
| 1
|
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowercase_ = TypeVar("""KEY""")
lowercase_ = TypeVar("""VAL""")
@dataclass(frozen=UpperCAmelCase , slots=UpperCAmelCase )
class SCREAMING_SNAKE_CASE (Generic[KEY, VAL] ):
_UpperCamelCase : KEY
_UpperCamelCase : VAL
class SCREAMING_SNAKE_CASE (_Item ):
def __init__( self : Optional[int] )-> None:
"""simple docstring"""
super().__init__(a , a )
def __bool__( self : str )-> bool:
"""simple docstring"""
return False
lowercase_ = _DeletedItem()
class SCREAMING_SNAKE_CASE (MutableMapping[KEY, VAL] ):
def __init__( self : Tuple , a : int = 8 , a : float = 0.75 )-> None:
"""simple docstring"""
lowercase__ = initial_block_size
lowercase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowercase__ = capacity_factor
lowercase__ = 0
def SCREAMING_SNAKE_CASE_ ( self : Any , a : KEY )-> int:
"""simple docstring"""
return hash(a ) % len(self._buckets )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : int )-> int:
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : int , a : KEY , a : VAL )-> bool:
"""simple docstring"""
lowercase__ = self._buckets[ind]
if not stored:
lowercase__ = _Item(a , a )
self._len += 1
return True
elif stored.key == key:
lowercase__ = _Item(a , a )
return True
else:
return False
def SCREAMING_SNAKE_CASE_ ( self : str )-> bool:
"""simple docstring"""
lowercase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> bool:
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
lowercase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int )-> None:
"""simple docstring"""
lowercase__ = self._buckets
lowercase__ = [None] * new_size
lowercase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> None:
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> None:
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : KEY )-> Iterator[int]:
"""simple docstring"""
lowercase__ = self._get_bucket_index(a )
for _ in range(len(self._buckets ) ):
yield ind
lowercase__ = self._get_next_ind(a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : KEY , a : VAL )-> None:
"""simple docstring"""
for ind in self._iterate_buckets(a ):
if self._try_set(a , a , a ):
break
def __setitem__( self : List[Any] , a : KEY , a : VAL )-> None:
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(a , a )
def __delitem__( self : str , a : KEY )-> None:
"""simple docstring"""
for ind in self._iterate_buckets(a ):
lowercase__ = self._buckets[ind]
if item is None:
raise KeyError(a )
if item is _deleted:
continue
if item.key == key:
lowercase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : List[str] , a : KEY )-> VAL:
"""simple docstring"""
for ind in self._iterate_buckets(a ):
lowercase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(a )
def __len__( self : Tuple )-> int:
"""simple docstring"""
return self._len
def __iter__( self : int )-> Iterator[KEY]:
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self : Union[str, Any] )-> str:
"""simple docstring"""
lowercase__ = ' ,'.join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 269
|
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowercase_ = {
# 1536-bit
5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 2048-bit
14: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AACAA68FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 3072-bit
15: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 4096-bit
16: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"""
+ """FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 6144-bit
17: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"""
+ """8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"""
+ """302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"""
+ """A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"""
+ """49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"""
+ """FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"""
+ """180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"""
+ """3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"""
+ """04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"""
+ """B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"""
+ """1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"""
+ """E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"""
+ """99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"""
+ """04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"""
+ """233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"""
+ """D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"""
+ """AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"""
+ """DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"""
+ """2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"""
+ """F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"""
+ """BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"""
+ """B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"""
+ """387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"""
+ """6DCC4024FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 8192-bit
18: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"""
+ """F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"""
+ """179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"""
+ """DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"""
+ """5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"""
+ """D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"""
+ """23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"""
+ """06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"""
+ """DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"""
+ """12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"""
+ """38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"""
+ """741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"""
+ """3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"""
+ """22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"""
+ """4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"""
+ """062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"""
+ """4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"""
+ """B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"""
+ """4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"""
+ """9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"""
+ """60C980DD98EDD3DFFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
}
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , a : int = 14 )-> None:
"""simple docstring"""
if group not in primes:
raise ValueError('Unsupported Group' )
lowercase__ = primes[group]['prime']
lowercase__ = primes[group]['generator']
lowercase__ = int(hexlify(urandom(32 ) ) , base=16 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> str:
"""simple docstring"""
lowercase__ = pow(self.generator , self.__private_key , self.prime )
return hex(a )[2:]
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : int )-> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(a , (self.prime - 1) // 2 , self.prime ) == 1
)
def SCREAMING_SNAKE_CASE_ ( self : str , a : str )-> str:
"""simple docstring"""
lowercase__ = int(a , base=16 )
if not self.is_valid_public_key(a ):
raise ValueError('Invalid public key' )
lowercase__ = pow(a , self.__private_key , self.prime )
return shaaaa(str(a ).encode() ).hexdigest()
@staticmethod
def SCREAMING_SNAKE_CASE_ ( a : int , a : int )-> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(a , (prime - 1) // 2 , a ) == 1
)
@staticmethod
def SCREAMING_SNAKE_CASE_ ( a : str , a : str , a : int = 14 )-> str:
"""simple docstring"""
lowercase__ = int(a , base=16 )
lowercase__ = int(a , base=16 )
lowercase__ = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(a , a ):
raise ValueError('Invalid public key' )
lowercase__ = pow(a , a , a )
return shaaaa(str(a ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 269
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any=1_3 , lowerCAmelCase_ : Dict=3_0 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : int=3_7 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : int=1_0 , lowerCAmelCase_ : Optional[Any]=0.02 , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[Any]=2 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = scope
lowercase_ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ = (image_size // patch_size) ** 2
lowercase_ = num_patches + 1
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = ViTModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = ViTForMaskedImageModeling(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
lowercase_ = 1
lowercase_ = ViTForMaskedImageModeling(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = self.type_sequence_label_size
lowercase_ = ViTForImageClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
lowercase_ = 1
lowercase_ = ViTForImageClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = ViTModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""")
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(lowerCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear))
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = ViTModel.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE () -> int:
'''simple docstring'''
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""") if is_vision_available() else None
@slow
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""").to(lowerCAmelCase_)
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""").to(lowerCAmelCase_)
# forward pass
with torch.no_grad():
lowercase_ = model(**lowerCAmelCase_)
# verify the logits
lowercase_ = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowerCAmelCase_)
lowercase_ = torch.tensor([-0.2_744, 0.8_215, -0.0_836]).to(lowerCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4))
@slow
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = ViTModel.from_pretrained("""facebook/dino-vits8""").to(lowerCAmelCase_)
lowercase_ = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=4_8_0)
lowercase_ = prepare_img()
lowercase_ = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""")
lowercase_ = inputs.pixel_values.to(lowerCAmelCase_)
# forward pass
with torch.no_grad():
lowercase_ = model(lowerCAmelCase_ , interpolate_pos_encoding=lowerCAmelCase_)
# verify the logits
lowercase_ = torch.Size((1, 3_6_0_1, 3_8_4))
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase_)
lowercase_ = torch.tensor(
[[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]]).to(lowerCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1E-4))
@slow
@require_accelerate
@require_torch_gpu
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""")
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""")
lowercase_ = inputs.pixel_values.to(lowerCAmelCase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase_ = model(lowerCAmelCase_)
| 136
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
lowercase_ = len(__lowerCAmelCase )
lowercase_ = [[0] * n for i in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
lowercase_ = y_points[i]
for i in range(2 , __lowerCAmelCase ):
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136
| 1
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowerCamelCase_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class a_ ( datasets.BuilderConfig ):
'''simple docstring'''
__a: List[Any] = 1_0_0_0_0
__a: Optional[Any] = None
__a: Union[str, Any] = None
class a_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__a: Dict = ParquetConfig
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _lowercase ( self , lowercase_ ) -> Optional[int]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowerCAmelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase , (str, list, tuple) ):
lowerCAmelCase_ = data_files
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase_ = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
lowerCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase_ = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_lowerCamelCase ):
with open(_lowerCamelCase , 'rb' ) as f:
lowerCAmelCase_ = datasets.Features.from_arrow_schema(pq.read_schema(_lowerCamelCase ) )
break
splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'files': files} ) )
return splits
def _lowercase ( self , lowercase_ ) -> str:
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCAmelCase_ = table_cast(_lowerCamelCase , self.info.features.arrow_schema )
return pa_table
def _lowercase ( self , lowercase_ ) -> int:
'''simple docstring'''
lowerCAmelCase_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
with open(_lowerCamelCase , 'rb' ) as f:
lowerCAmelCase_ = pq.ParquetFile(_lowerCamelCase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
lowerCAmelCase_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(_lowerCamelCase )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}''' )
raise
| 354
|
import baseaa
def lowerCamelCase ( a_ ) -> bytes:
return baseaa.baaencode(string.encode('utf-8' ) )
def lowerCamelCase ( a_ ) -> str:
return baseaa.baadecode(a_ ).decode('utf-8' )
if __name__ == "__main__":
lowerCamelCase_ = """Hello World!"""
lowerCamelCase_ = baseaa_encode(test)
print(encoded)
lowerCamelCase_ = baseaa_decode(encoded)
print(decoded)
| 14
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 171
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=4 , ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Any = batch_size
UpperCAmelCase__ : Optional[int] = seq_length
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : int = use_attention_mask
UpperCAmelCase__ : Any = use_token_type_ids
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : Optional[Any] = num_attention_heads
UpperCAmelCase__ : str = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = type_vocab_size
UpperCAmelCase__ : Dict = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : int = num_choices
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : int = None
if self.use_attention_mask:
UpperCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : List[Any] = None
if self.use_token_type_ids:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = config_and_inputs
UpperCAmelCase__ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = config_and_inputs
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = FlaxRobertaModelTester(self )
@slow
def _a (self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class_name.from_pretrained("""roberta-base""" , from_pt=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
| 171
| 1
|
import comet # From: unbabel-comet
import torch
import datasets
a_ : Union[str, Any] = datasets.logging.get_logger(__name__)
a_ : Optional[int] = '\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = "{COMET}: A Neural Framework for {MT} Evaluation",\n author = "Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon",\n booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",\n month = nov,\n year = "2020",\n address = "Online",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",\n pages = "2685--2702",\n}\n'
a_ : List[str] = '\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n'
a_ : str = '\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]\n >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]\n >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results["scores"]])\n [0.19, 0.92]\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://unbabel.github.io/COMET/html/index.html' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'sources': datasets.Value('string' , id='sequence'),
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/Unbabel/COMET'] , reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] , )
def SCREAMING_SNAKE_CASE__ ( self , a) -> str:
if self.config_name == "default":
SCREAMING_SNAKE_CASE = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da'))
else:
SCREAMING_SNAKE_CASE = comet.load_from_checkpoint(comet.download_model(self.config_name))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a=None , a=False) -> Union[str, Any]:
if gpus is None:
SCREAMING_SNAKE_CASE = 1 if torch.cuda.is_available() else 0
SCREAMING_SNAKE_CASE = {'src': sources, 'mt': predictions, 'ref': references}
SCREAMING_SNAKE_CASE = [dict(zip(a , a)) for t in zip(*data.values())]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.scorer.predict(a , gpus=a , progress_bar=a)
return {"mean_score": mean_score, "scores": scores}
| 327
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = TFCamembertModel.from_pretrained('jplu/tf-camembert-base')
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE = model(a)['last_hidden_state']
SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10, 768))
self.assertEqual(output.shape , a)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 327
| 1
|
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
A__ = False
if num < 0:
A__ = True
A__ = -num
A__ = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(SCREAMING_SNAKE_CASE__ ) for e in binary )
return "0b" + "".join(str(SCREAMING_SNAKE_CASE__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7
|
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class A :
"""simple docstring"""
def __init__( self : str,lowercase_ : Any,lowercase_ : Tuple=1_3,lowercase_ : str=7,lowercase_ : Tuple=True,lowercase_ : int=True,lowercase_ : List[Any]=True,lowercase_ : List[str]=True,lowercase_ : List[str]=9_9,lowercase_ : List[Any]=6_4,lowercase_ : List[str]=5,lowercase_ : Optional[Any]=4,lowercase_ : Optional[Any]=3_7,lowercase_ : Optional[Any]="gelu",lowercase_ : int=0.1,lowercase_ : str=0.1,lowercase_ : Optional[Any]=5_1_2,lowercase_ : int=1_6,lowercase_ : List[Any]=2,lowercase_ : Union[str, Any]=0.02,lowercase_ : Tuple=3,lowercase_ : List[Any]=4,lowercase_ : str=None,)-> Union[str, Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = vocab_size - 1
def snake_case__ ( self : str )-> Optional[Any]:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
A__ = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case__ ( self : List[Any] )-> Tuple:
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=lowercase_,initializer_range=self.initializer_range,pad_token_id=self.pad_token_id,)
def snake_case__ ( self : Optional[int] )-> Union[str, Any]:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.prepare_config_and_inputs()
A__ = True
return config, input_ids, input_mask, token_labels
def snake_case__ ( self : Any,lowercase_ : List[Any],lowercase_ : List[Any],lowercase_ : str )-> Any:
'''simple docstring'''
A__ = GPTNeoXModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_ )
A__ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Union[str, Any],lowercase_ : List[str],lowercase_ : Dict,lowercase_ : Optional[Any] )-> Tuple:
'''simple docstring'''
A__ = True
A__ = GPTNeoXModel(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Union[str, Any],lowercase_ : str,lowercase_ : Union[str, Any],lowercase_ : Union[str, Any],lowercase_ : List[str] )-> List[str]:
'''simple docstring'''
A__ = GPTNeoXForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[int],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : Dict,lowercase_ : Any )-> int:
'''simple docstring'''
A__ = self.num_labels
A__ = GPTNeoXForQuestionAnswering(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_ )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def snake_case__ ( self : List[str],lowercase_ : List[str],lowercase_ : int,lowercase_ : Union[str, Any],lowercase_ : Optional[int] )-> str:
'''simple docstring'''
A__ = self.num_labels
A__ = GPTNeoXForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def snake_case__ ( self : Any,lowercase_ : Union[str, Any],lowercase_ : List[Any],lowercase_ : Optional[Any],lowercase_ : int )-> Union[str, Any]:
'''simple docstring'''
A__ = self.num_labels
A__ = GPTNeoXForTokenClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : int,lowercase_ : str,lowercase_ : int,lowercase_ : Union[str, Any] )-> List[Any]:
'''simple docstring'''
A__ = True
A__ = GPTNeoXForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
A__ = model(lowercase_,attention_mask=lowercase_,use_cache=lowercase_ )
A__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3),config.vocab_size )
A__ = ids_tensor((self.batch_size, 3),vocab_size=2 )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens],dim=-1 )
A__ = torch.cat([input_mask, next_mask],dim=-1 )
A__ = model(lowercase_,attention_mask=lowercase_,output_hidden_states=lowercase_ )
A__ = output_from_no_past['hidden_states'][0]
A__ = model(
lowercase_,attention_mask=lowercase_,past_key_values=lowercase_,output_hidden_states=lowercase_,)['hidden_states'][0]
# select random slice
A__ = ids_tensor((1,),output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_,lowercase_,atol=1E-3 ) )
def snake_case__ ( self : str )-> Union[str, Any]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def snake_case__ ( self : str )-> Tuple:
'''simple docstring'''
A__ = GPTNeoXModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,hidden_size=6_4,num_attention_heads=8 )
def snake_case__ ( self : Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase_,lowercase_,lowercase_ )
def snake_case__ ( self : Dict )-> List[Any]:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowercase_,lowercase_,lowercase_ )
def snake_case__ ( self : List[str] )-> Any:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ = None
self.model_tester.create_and_check_model_as_decoder(lowercase_,lowercase_,lowercase_ )
def snake_case__ ( self : Optional[Any] )-> str:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase_,lowercase_,lowercase_ )
def snake_case__ ( self : Dict )-> Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowercase_ )
def snake_case__ ( self : Tuple )-> List[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def snake_case__ ( self : Any )-> List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def snake_case__ ( self : str )-> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def snake_case__ ( self : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case__ ( self : List[str],lowercase_ : Any )-> List[str]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = ids_tensor([1, 1_0],config.vocab_size )
A__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )],config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
A__ = GPTNeoXModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
A__ = original_model(lowercase_ ).last_hidden_state
A__ = original_model(lowercase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
A__ = {'type': scaling_type, 'factor': 10.0}
A__ = GPTNeoXModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
A__ = scaled_model(lowercase_ ).last_hidden_state
A__ = scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) )
@require_torch
class A ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : Tuple )-> Union[str, Any]:
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
A__ = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowercase_ )
A__ = tokenizer('My favorite food is',return_tensors='pt' ).to(lowercase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
A__ = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
A__ = model.generate(**lowercase_,do_sample=lowercase_,max_new_tokens=2_0 )
A__ = tokenizer.batch_decode(lowercase_ )[0]
self.assertEqual(lowercase_,lowercase_ )
| 7
| 1
|
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __UpperCAmelCase ( __a : np.ndarray ,__a : Optional[str] ,__a : Optional[str] ) -> List[Any]:
"""simple docstring"""
_a : str = to_pil_image(__a )
_a : Optional[Any] = pil_image.size
_a : Tuple = pytesseract.image_to_data(__a ,lang=__a ,output_type='''dict''' ,config=__a )
_a : List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
_a : Dict = [idx for idx, word in enumerate(__a ) if not word.strip()]
_a : str = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices]
_a : List[str] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : str = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : int = []
for x, y, w, h in zip(__a ,__a ,__a ,__a ):
_a : List[str] = [x, y, x + w, y + h]
actual_boxes.append(__a )
# finally, normalize the bounding boxes
_a : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__a ,__a ,__a ) )
assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , _a = True , _a = None , _a = "" , **_a , ) -> None:
super().__init__(**_a )
_a : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_a : Union[str, Any] = get_size_dict(_a )
_a : int = do_resize
_a : Optional[int] = size
_a : str = resample
_a : str = do_rescale
_a : Any = rescale_value
_a : Optional[Any] = do_normalize
_a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
_a : List[Any] = apply_ocr
_a : Optional[int] = ocr_lang
_a : Tuple = tesseract_config
def __lowercase ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> np.ndarray:
_a : Any = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_a : Optional[int] = (size['''height'''], size['''width'''])
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_a : Optional[int] = do_resize if do_resize is not None else self.do_resize
_a : Union[str, Any] = size if size is not None else self.size
_a : Any = get_size_dict(_a )
_a : List[str] = resample if resample is not None else self.resample
_a : int = do_rescale if do_rescale is not None else self.do_rescale
_a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : int = do_normalize if do_normalize is not None else self.do_normalize
_a : str = image_mean if image_mean is not None else self.image_mean
_a : Tuple = image_std if image_std is not None else self.image_std
_a : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
_a : int = ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[Any] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
_a : Any = [to_numpy_array(_a ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
_a : str = []
_a : str = []
for image in images:
_a : Union[str, Any] = apply_tesseract(_a , _a , _a )
words_batch.append(_a )
boxes_batch.append(_a )
if do_resize:
_a : List[str] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
_a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_a : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_a : List[str] = [to_channel_dimension_format(_a , _a ) for image in images]
_a : List[str] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a )
if apply_ocr:
_a : Optional[int] = words_batch
_a : List[Any] = boxes_batch
return data
| 366
|
from __future__ import annotations
def __UpperCAmelCase ( __a : list ) -> float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(__a ) / len(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( A_ : Tuple, A_ : int, A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = LxmertConfig.from_json_file(A_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowerCamelCase : List[str] = LxmertForPreTraining(A_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A_, A_, A_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 72
|
'''simple docstring'''
import math
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> float:
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 321
| 0
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
A__ = get_logger(__name__)
A__ = Path(__file__).parent / '''model_card_template.md'''
A__ = uuida().hex
A__ = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
A__ = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
A__ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def _lowerCAmelCase ( __lowerCAmelCase = None ) -> str:
"""simple docstring"""
snake_case__ : List[Any] = f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
ua += "; " + user_agent
return ua
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None ) -> Tuple:
"""simple docstring"""
if token is None:
snake_case__ : Optional[Any] = HfFolder.get_token()
if organization is None:
snake_case__ : Tuple = whoami(__lowerCAmelCase )['''name''']
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(__lowerCAmelCase , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
snake_case__ : str = args.hub_token if hasattr(__lowerCAmelCase , '''hub_token''' ) else None
snake_case__ : List[str] = get_full_repo_name(__lowerCAmelCase , token=__lowerCAmelCase )
snake_case__ : str = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__lowerCAmelCase , model_name=__lowerCAmelCase , repo_name=__lowerCAmelCase , dataset_name=args.dataset_name if hasattr(__lowerCAmelCase , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__lowerCAmelCase , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__lowerCAmelCase , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(__lowerCAmelCase , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__lowerCAmelCase , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__lowerCAmelCase , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__lowerCAmelCase , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__lowerCAmelCase , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__lowerCAmelCase , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(__lowerCAmelCase , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__lowerCAmelCase , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
snake_case__ : str = os.path.join(args.output_dir , '''README.md''' )
model_card.save(__lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase = None ) -> str:
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
snake_case__ : List[str] = str(Path(__lowerCAmelCase ).as_posix() )
snake_case__ : Dict = re.search(r'''snapshots/([^/]+)/''' , __lowerCAmelCase )
if search is None:
return None
snake_case__ : str = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__lowerCAmelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
A__ = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
A__ = os.path.join(hf_cache_home, '''diffusers''')
def _lowerCAmelCase ( __lowerCAmelCase = None , __lowerCAmelCase = None ) -> None:
"""simple docstring"""
if new_cache_dir is None:
snake_case__ : Union[str, Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
snake_case__ : int = old_diffusers_cache
snake_case__ : int = Path(__lowerCAmelCase ).expanduser()
snake_case__ : List[Any] = Path(__lowerCAmelCase ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
snake_case__ : Dict = new_cache_dir / old_blob_path.relative_to(__lowerCAmelCase )
new_blob_path.parent.mkdir(parents=__lowerCAmelCase , exist_ok=__lowerCAmelCase )
os.replace(__lowerCAmelCase , __lowerCAmelCase )
try:
os.symlink(__lowerCAmelCase , __lowerCAmelCase )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
A__ = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
A__ = 0
else:
with open(cache_version_file) as f:
try:
A__ = int(f.read())
except ValueError:
A__ = 0
if cache_version < 1:
A__ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
A__ = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'''the directory exists and can be written to.'''
)
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase = None ) -> str:
"""simple docstring"""
if variant is not None:
snake_case__ : Any = weights_name.split('''.''' )
snake_case__ : int = splits[:-1] + [variant] + splits[-1:]
snake_case__ : List[str] = '''.'''.join(__lowerCAmelCase )
return weights_name
def _lowerCAmelCase ( __lowerCAmelCase , *,
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = str(__lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(__lowerCAmelCase ):
if os.path.isfile(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ):
# Load from a PyTorch checkpoint
snake_case__ : List[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) ):
snake_case__ : str = os.path.join(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__lowerCAmelCase ).base_version ) >= version.parse('''0.20.0''' )
):
try:
snake_case__ : str = hf_hub_download(
__lowerCAmelCase , filename=_add_variant(__lowerCAmelCase , __lowerCAmelCase ) , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , proxies=__lowerCAmelCase , resume_download=__lowerCAmelCase , local_files_only=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , user_agent=__lowerCAmelCase , subfolder=__lowerCAmelCase , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __lowerCAmelCase , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__lowerCAmelCase , __lowerCAmelCase )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__lowerCAmelCase , __lowerCAmelCase )}' so that the correct variant file can be added.""" , __lowerCAmelCase , )
try:
# 2. Load model file as usual
snake_case__ : Dict = hf_hub_download(
__lowerCAmelCase , filename=__lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , proxies=__lowerCAmelCase , resume_download=__lowerCAmelCase , local_files_only=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , user_agent=__lowerCAmelCase , subfolder=__lowerCAmelCase , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'''this model name. Check the model page at '''
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 44
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Dict = TransfoXLTokenizer
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : List[str] = False
def __lowerCamelCase ( self :Union[str, Any] ):
super().setUp()
snake_case__ : Optional[int] = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCamelCase ( self :int ,**__lowercase :Any ):
snake_case__ : str = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname ,**__lowercase )
def __lowerCamelCase ( self :int ,__lowercase :Optional[int] ):
snake_case__ : int = '''<unk> UNwanted , running'''
snake_case__ : List[Any] = '''<unk> unwanted, running'''
return input_text, output_text
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Optional[Any] = TransfoXLTokenizer(vocab_file=self.vocab_file ,lower_case=__lowercase )
snake_case__ : Tuple = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(__lowercase ,['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) ,[0, 4, 8, 7] )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : List[Any] = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) ,['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Optional[Any] = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Any = TransfoXLTokenizer(lower_case=__lowercase )
snake_case__ : List[str] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
snake_case__ : Union[str, Any] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(__lowercase ) ,__lowercase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowercase ) ,__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Any = self.get_tokenizer()
snake_case__ : Optional[Any] = len(__lowercase )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' ,1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowercase ) ,original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) ,[1] )
self.assertEqual(tokenizer.decode([1] ) ,'''new1''' )
| 44
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ =logging.get_logger(__name__)
def __UpperCamelCase ( lowerCAmelCase__ : Tuple ):
__a : Any = DPTConfig()
if "large" in checkpoint_url:
__a : List[Any] = 1_0_2_4
__a : List[str] = 4_0_9_6
__a : Tuple = 2_4
__a : List[Any] = 1_6
__a : List[str] = [5, 1_1, 1_7, 2_3]
__a : Union[str, Any] = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
__a : Dict = (1, 3_8_4, 3_8_4)
if "ade" in checkpoint_url:
__a : List[Any] = True
__a : str = 1_5_0
__a : Dict = '''huggingface/label-files'''
__a : Union[str, Any] = '''ade20k-id2label.json'''
__a : int = json.load(open(cached_download(hf_hub_url(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) ) , '''r''' ) )
__a : Tuple = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
__a : Union[str, Any] = idalabel
__a : Union[str, Any] = {v: k for k, v in idalabel.items()}
__a : List[Any] = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def __UpperCamelCase ( lowerCAmelCase__ : int ):
__a : Optional[Any] = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : str ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__a : Tuple = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
__a : str = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
__a : Union[str, Any] = name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
__a : List[Any] = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
__a : int = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
__a : Dict = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
__a : Optional[Any] = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
__a : Optional[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__a : List[str] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
__a : Tuple = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__a : str = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
__a : Optional[Any] = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
__a : Tuple = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
__a : Optional[Any] = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
__a : Tuple = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
__a : Any = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
__a : Union[str, Any] = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
__a : Dict = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__a : int = name.replace(f"refinenet{layer_idx}" , f"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
__a : List[str] = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
__a : Optional[int] = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
__a : Optional[int] = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
__a : Optional[int] = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
__a : Optional[int] = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__a : Optional[Any] = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
__a : Union[str, Any] = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
__a : List[Any] = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
__a : str = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__a : Union[str, Any] = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
__a : List[Any] = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
__a : Optional[int] = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
__a : List[Any] = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
__a : Union[str, Any] = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
__a : str = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
__a : Optional[Any] = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
__a : Optional[Any] = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
__a : List[str] = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
__a : int = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
__a : Union[str, Any] = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
__a : str = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def __UpperCamelCase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__a : Tuple = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight" )
__a : List[Any] = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__a : str = in_proj_weight[: config.hidden_size, :]
__a : Tuple = in_proj_bias[: config.hidden_size]
__a : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__a : Dict = in_proj_weight[
-config.hidden_size :, :
]
__a : List[Any] = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( ):
__a : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a : Tuple = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ):
__a : List[Any] = get_dpt_config(lowerCAmelCase__ )
# load original state_dict from URL
__a : int = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(lowerCAmelCase__ )
# rename keys
for key in state_dict.copy().keys():
__a : Tuple = state_dict.pop(lowerCAmelCase__ )
__a : Optional[int] = val
# read in qkv matrices
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# load HuggingFace model
__a : int = DPTForSemanticSegmentation(lowerCAmelCase__ ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# Check outputs on an image
__a : Any = 4_8_0 if '''ade''' in checkpoint_url else 3_8_4
__a : Union[str, Any] = DPTImageProcessor(size=lowerCAmelCase__ )
__a : Any = prepare_img()
__a : Any = image_processor(lowerCAmelCase__ , return_tensors='''pt''' )
# forward pass
__a : Dict = model(**lowerCAmelCase__ ).logits if '''ade''' in checkpoint_url else model(**lowerCAmelCase__ ).predicted_depth
# Assert logits
__a : Union[str, Any] = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] )
if "ade" in checkpoint_url:
__a : Any = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] )
assert outputs.shape == torch.Size(lowerCAmelCase__ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCAmelCase__ )
)
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=lowerCAmelCase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=lowerCAmelCase__ , )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowercase__ =parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 216
|
import requests
from bsa import BeautifulSoup
def _a ( SCREAMING_SNAKE_CASE : str = "AAPL" ):
"""simple docstring"""
UpperCamelCase__ : Tuple = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
UpperCamelCase__ : int = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE ).text , '''html.parser''' )
UpperCamelCase__ : str = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 146
| 0
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
__a = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __snake_case( _lowerCAmelCase ) -> int:
if isinstance(_A , torch.Tensor ):
return image
elif isinstance(_A , PIL.Image.Image ):
snake_case__ : str = [image]
snake_case__ : Dict = [trans(img.convert("""RGB""" ) ) for img in image]
snake_case__ : Optional[int] = torch.stack(_A )
return image
class UpperCAmelCase_ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ):
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case__ : str = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__snake_case , scheduler=__snake_case )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Union[str, Any] ):
if strength < 0 or strength > 1:
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}" )
def lowerCamelCase ( self : Optional[Any] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : List[str] ):
# get the original timestep using init_timestep
snake_case__ : Union[str, Any] = min(int(num_inference_steps * strength ) , __snake_case )
snake_case__ : str = max(num_inference_steps - init_timestep , 0 )
snake_case__ : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : str , snake_case_ : int , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Dict=None ):
if not isinstance(__snake_case , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__snake_case )}" )
snake_case__ : Union[str, Any] = image.to(device=__snake_case , dtype=__snake_case )
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
snake_case__ : List[Any] = init_latents.shape
snake_case__ : str = randn_tensor(__snake_case , generator=__snake_case , device=__snake_case , dtype=__snake_case )
# get latents
print("""add noise to latents at timestep""" , __snake_case )
snake_case__ : List[Any] = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case )
snake_case__ : Dict = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , snake_case_ : Union[torch.FloatTensor, PIL.Image.Image] = None , snake_case_ : float = 0.8 , snake_case_ : int = 1 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : float = 0.0 , snake_case_ : int = 50 , snake_case_ : Optional[bool] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
self.check_inputs(__snake_case )
# 2. Preprocess image
snake_case__ : Tuple = preprocess(__snake_case )
# 3. set timesteps
self.scheduler.set_timesteps(__snake_case , device=self.device )
snake_case__ : Optional[Any] = self.get_timesteps(__snake_case , __snake_case , self.device )
snake_case__ : List[str] = timesteps[:1].repeat(__snake_case )
# 4. Prepare latent variables
snake_case__ : List[str] = self.prepare_latents(__snake_case , __snake_case , __snake_case , self.unet.dtype , self.device , __snake_case )
snake_case__ : Optional[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(__snake_case ):
# 1. predict noise model_output
snake_case__ : List[Any] = self.unet(__snake_case , __snake_case ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case__ : Optional[int] = self.scheduler.step(
__snake_case , __snake_case , __snake_case , eta=__snake_case , use_clipped_model_output=__snake_case , generator=__snake_case , ).prev_sample
snake_case__ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case__ : List[str] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__snake_case )
| 357
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> int:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
snake_case__ : List[Any] = 0
snake_case__ : Union[str, Any] = str(_lowerCAmelCase )
while len(_lowerCAmelCase ) != 1:
snake_case__ : List[Any] = [int(_lowerCAmelCase ) for i in num_string]
snake_case__ : str = 1
for i in range(0 , len(_lowerCAmelCase ) ):
total *= numbers[i]
snake_case__ : Optional[Any] = str(_lowerCAmelCase )
steps += 1
return steps
def __snake_case( _lowerCAmelCase ) -> int:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
snake_case__ : Union[str, Any] = 0
snake_case__ : List[str] = str(_lowerCAmelCase )
while len(_lowerCAmelCase ) != 1:
snake_case__ : Optional[int] = [int(_lowerCAmelCase ) for i in num_string]
snake_case__ : Dict = 0
for i in range(0 , len(_lowerCAmelCase ) ):
total += numbers[i]
snake_case__ : List[Any] = str(_lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 0
|
"""simple docstring"""
__UpperCamelCase = 9.80665
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = g ) -> float:
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 69
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase: str = logging.get_logger(__name__)
UpperCAmelCase: int = '''▁'''
UpperCAmelCase: str = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
UpperCAmelCase: List[str] = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
UpperCAmelCase: int = {'''vinai/bartpho-syllable''': 1_024}
class UpperCamelCase ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : int = ["""input_ids""", """attention_mask"""]
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_="<s>" ,UpperCAmelCase_="</s>" ,UpperCAmelCase_="</s>" ,UpperCAmelCase_="<s>" ,UpperCAmelCase_="<unk>" ,UpperCAmelCase_="<pad>" ,UpperCAmelCase_="<mask>" ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
# Mask token behave like a normal word, i.e. include the space before it
_lowercase : Optional[int] = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else mask_token
_lowercase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__lowerCamelCase ,)
_lowercase : int = vocab_file
_lowercase : Optional[int] = monolingual_vocab_file
_lowercase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_lowercase : Optional[Any] = {}
_lowercase : Optional[Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCamelCase ) not in self.fairseq_tokens_to_ids:
_lowercase : Tuple = cnt
cnt += 1
with open(__lowerCamelCase ,"""r""" ,encoding="""utf-8""" ) as f:
for line in f.readlines():
_lowercase : int = line.strip().split()[0]
_lowercase : Any = len(self.fairseq_tokens_to_ids )
if str(__lowerCamelCase ) not in self.fairseq_tokens_to_ids:
_lowercase : str = len(self.fairseq_tokens_to_ids )
_lowercase : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
_lowercase : int = self.__dict__.copy()
_lowercase : Optional[int] = None
_lowercase : List[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,UpperCAmelCase_ ):
_lowercase : List[str] = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
_lowercase : Dict = {}
_lowercase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowercase : Union[str, Any] = [self.cls_token_id]
_lowercase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase ,token_ids_a=__lowerCamelCase ,already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Any = [self.sep_token_id]
_lowercase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase__ ( self ):
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase__ ( self ):
_lowercase : Tuple = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return self.sp_model.encode(__lowerCamelCase ,out_type=__lowerCamelCase )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : int = """""".join(__lowerCamelCase ).replace(__lowerCamelCase ,""" """ ).strip()
return out_string
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowercase : Tuple = os.path.join(
__lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : List[Any] = os.path.join(
__lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] ,)
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase ,"""wb""" ) as fi:
_lowercase : List[str] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file ,__lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(__lowerCamelCase )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 368
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase: Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = XLNetTokenizer
SCREAMING_SNAKE_CASE_ : Dict = XLNetTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
def lowerCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = """<s>"""
_lowercase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) ,UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""<eod>""" )
self.assertEqual(len(UpperCAmelCase_ ) ,10_06 )
def lowerCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,10_00 )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,[2_85, 46, 10, 1_70, 3_82] )
_lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
_lowercase : List[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""▁he""", """ll""", """o"""] )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
@slow
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
_lowercase : int = tokenizer.encode("""sequence builders""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
_lowercase : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ,UpperCAmelCase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCamelCase__ ( self ):
# fmt: off
_lowercase : Union[str, Any] = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ ,model_name="""xlnet-base-cased""" ,revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" ,)
| 336
| 0
|
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("multiplicative_persistence() only accepts integral values" )
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values" )
a__ : Dict =0
a__ : List[Any] =str(SCREAMING_SNAKE_CASE )
while len(SCREAMING_SNAKE_CASE ) != 1:
a__ : Any =[int(SCREAMING_SNAKE_CASE ) for i in num_string]
a__ : Optional[int] =1
for i in range(0 , len(SCREAMING_SNAKE_CASE ) ):
total *= numbers[i]
a__ : Dict =str(SCREAMING_SNAKE_CASE )
steps += 1
return steps
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("additive_persistence() only accepts integral values" )
if num < 0:
raise ValueError("additive_persistence() does not accept negative values" )
a__ : Dict =0
a__ : Tuple =str(SCREAMING_SNAKE_CASE )
while len(SCREAMING_SNAKE_CASE ) != 1:
a__ : Optional[Any] =[int(SCREAMING_SNAKE_CASE ) for i in num_string]
a__ : Any =0
for i in range(0 , len(SCREAMING_SNAKE_CASE ) ):
total += numbers[i]
a__ : Optional[Any] =str(SCREAMING_SNAKE_CASE )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : List[Any] = """canine"""
def __init__( self , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_6_3_8_4 , lowerCAmelCase__=1_6 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__=0XE0_00 , lowerCAmelCase__=0XE0_01 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=8 , lowerCAmelCase__=1_6_3_8_4 , lowerCAmelCase__=1_2_8 , **lowerCAmelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Optional[int] =max_position_embeddings
a__ : str =hidden_size
a__ : Optional[Any] =num_hidden_layers
a__ : Tuple =num_attention_heads
a__ : Optional[Any] =intermediate_size
a__ : Optional[int] =hidden_act
a__ : List[Any] =hidden_dropout_prob
a__ : Union[str, Any] =attention_probs_dropout_prob
a__ : Optional[Any] =initializer_range
a__ : Union[str, Any] =type_vocab_size
a__ : Optional[int] =layer_norm_eps
# Character config:
a__ : int =downsampling_rate
a__ : Optional[Any] =upsampling_kernel_size
a__ : Union[str, Any] =num_hash_functions
a__ : Any =num_hash_buckets
a__ : int =local_transformer_stride
| 95
| 1
|
import heapq as hq
import math
from collections.abc import Iterator
class __magic_name__ :
"""simple docstring"""
def __init__( self :int , snake_case :Optional[Any] ):
'''simple docstring'''
A_ : Dict = str(id_ )
A_ : Tuple = None
A_ : Any = None
A_ : List[Any] = []
A_ : Optional[int] = {} # {vertex:distance}
def __lt__( self :int , snake_case :Dict ):
'''simple docstring'''
return self.key < other.key
def __repr__( self :Union[str, Any] ):
'''simple docstring'''
return self.id
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :List[str] ):
'''simple docstring'''
self.neighbors.append(snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Dict , snake_case :Optional[Any] ):
'''simple docstring'''
A_ : int = weight
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str ) -> Any:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _lowerCAmelCase )
graph[b - 1].add_edge(graph[a - 1] , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : list , _lowerCAmelCase : Vertex ) -> list:
A_ : Any = []
for u in graph:
A_ : int = math.inf
A_ : Union[str, Any] = None
A_ : List[str] = 0
A_ : Optional[int] = graph[:]
while q:
A_ : Optional[Any] = min(_lowerCAmelCase )
q.remove(_lowerCAmelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
A_ : Any = u
A_ : int = u.edges[v.id]
for i in range(1 , len(_lowerCAmelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __snake_case ( _lowerCAmelCase : list , _lowerCAmelCase : Vertex ) -> Iterator[tuple]:
for u in graph:
A_ : List[str] = math.inf
A_ : Dict = None
A_ : Tuple = 0
A_ : Optional[Any] = list(_lowerCAmelCase )
hq.heapify(_lowerCAmelCase )
while h:
A_ : List[str] = hq.heappop(_lowerCAmelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
A_ : Tuple = u
A_ : Dict = u.edges[v.id]
hq.heapify(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __snake_case ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Union[str, Any] = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70
| 1
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : UNetaDModel
lowerCamelCase : KarrasVeScheduler
def __init__( self : str , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : KarrasVeScheduler ) -> int:
super().__init__()
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : Dict , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 5_0 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Dict , ) -> Union[Tuple, ImagePipelineOutput]:
lowerCAmelCase = self.unet.config.sample_size
lowerCAmelCase = (batch_size, 3, img_size, img_size)
lowerCAmelCase = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
lowerCAmelCase = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
lowerCAmelCase = self.scheduler.schedule[t]
lowerCAmelCase = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
lowerCAmelCase , lowerCAmelCase = self.scheduler.add_noise_to_input(UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
lowerCAmelCase = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
lowerCAmelCase = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
lowerCAmelCase = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
lowerCAmelCase = self.scheduler.step_correct(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , step_output.prev_sample , step_output['derivative'] , )
lowerCAmelCase = step_output.prev_sample
lowerCAmelCase = (sample / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 4
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''encoder-decoder'''
UpperCAmelCase__ = True
def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A__ = kwargs.pop('''encoder''')
A__ = encoder_config.pop('''model_type''')
A__ = kwargs.pop('''decoder''')
A__ = decoder_config.pop('''model_type''')
from ..auto.configuration_auto import AutoConfig
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''')
A__ = True
A__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.encoder.to_dict()
A__ = self.decoder.to_dict()
A__ = self.__class__.model_type
return output
| 14
| 0
|
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowercase : int = logging.get_logger(__name__)
def lowerCamelCase ( UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[int]=None ):
return field(default_factory=lambda: default , metadata=lowerCAmelCase__ )
@dataclass
class __magic_name__ :
UpperCamelCase__ = list_field(
default=[], metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
}, )
UpperCamelCase__ = list_field(
default=[8], metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''})
UpperCamelCase__ = list_field(
default=[8, 32, 128, 512], metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''}, )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE, metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''}, )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE, metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''}, )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE, metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''})
UpperCamelCase__ = field(default=__SCREAMING_SNAKE_CASE, metadata={'''help''': '''Use FP16 to accelerate inference.'''})
UpperCamelCase__ = field(default=__SCREAMING_SNAKE_CASE, metadata={'''help''': '''Benchmark training of model'''})
UpperCamelCase__ = field(default=__SCREAMING_SNAKE_CASE, metadata={'''help''': '''Verbose memory tracing'''})
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE, metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''}, )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE, metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
}, )
UpperCamelCase__ = field(default=__SCREAMING_SNAKE_CASE, metadata={'''help''': '''Trace memory line by line'''})
UpperCamelCase__ = field(default=__SCREAMING_SNAKE_CASE, metadata={'''help''': '''Save result to a CSV file'''})
UpperCamelCase__ = field(default=__SCREAMING_SNAKE_CASE, metadata={'''help''': '''Save all print statements in a log file'''})
UpperCamelCase__ = field(default=__SCREAMING_SNAKE_CASE, metadata={'''help''': '''Whether to print environment information'''})
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE, metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
}, )
UpperCamelCase__ = field(
default=f"inference_time_{round(time())}.csv", metadata={'''help''': '''CSV filename used if saving time results to csv.'''}, )
UpperCamelCase__ = field(
default=f"inference_memory_{round(time())}.csv", metadata={'''help''': '''CSV filename used if saving memory results to csv.'''}, )
UpperCamelCase__ = field(
default=f"train_time_{round(time())}.csv", metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''}, )
UpperCamelCase__ = field(
default=f"train_memory_{round(time())}.csv", metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''}, )
UpperCamelCase__ = field(
default=f"env_info_{round(time())}.csv", metadata={'''help''': '''CSV filename used if saving environment information.'''}, )
UpperCamelCase__ = field(
default=f"log_{round(time())}.csv", metadata={'''help''': '''Log filename used if print statements are saved in log.'''}, )
UpperCamelCase__ = field(default=3, metadata={'''help''': '''Times an experiment will be run.'''})
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE, metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
}, )
def SCREAMING_SNAKE_CASE_ ( self : int ):
warnings.warn(
f'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , __UpperCAmelCase , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 366
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_lowercase : int = logging.get_logger(__name__)
@dataclass
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : Optional[Any] , **lowercase_ : int ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase_ : Optional[int] = deprecated_arg[3:]
setattr(self , lowercase_ , not kwargs.pop(lowercase_ ) )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
lowercase_ : Tuple = kwargs.pop("""torchscript""" , self.torchscript )
lowercase_ : List[Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
lowercase_ : List[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**lowercase_ )
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Trace the models using torchscript'''})
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''})
UpperCamelCase__ = field(
default='''O1''', metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
}, )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
lowercase_ : Optional[Any] = torch.device("""cpu""" )
lowercase_ : Tuple = 0
elif is_torch_tpu_available():
lowercase_ : Optional[int] = xm.xla_device()
lowercase_ : str = 0
else:
lowercase_ : int = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
lowercase_ : str = torch.cuda.device_count()
return device, n_gpu
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return is_torch_tpu_available() and self.tpu
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return self.n_gpu > 0
| 21
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : str = 'pix2struct_text_model'
_SCREAMING_SNAKE_CASE : int = ['past_key_values']
_SCREAMING_SNAKE_CASE : List[str] = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _UpperCamelCase=50244 , _UpperCamelCase=768 , _UpperCamelCase=64 , _UpperCamelCase=2048 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=32 , _UpperCamelCase=128 , _UpperCamelCase=0.1 , _UpperCamelCase=1E-6 , _UpperCamelCase=1.0 , _UpperCamelCase="gelu_new" , _UpperCamelCase=0 , _UpperCamelCase=False , _UpperCamelCase=0 , _UpperCamelCase=1 , _UpperCamelCase=False , _UpperCamelCase=True , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : str = vocab_size
_lowercase : Tuple = hidden_size
_lowercase : Optional[Any] = d_kv
_lowercase : Any = d_ff
_lowercase : List[Any] = num_layers
_lowercase : str = num_heads
_lowercase : List[Any] = relative_attention_num_buckets
_lowercase : int = relative_attention_max_distance
_lowercase : List[str] = dropout_rate
_lowercase : Optional[int] = layer_norm_epsilon
_lowercase : Optional[Any] = initializer_factor
_lowercase : Optional[int] = use_cache
_lowercase : int = eos_token_id
_lowercase : List[Any] = decoder_start_token_id
# for backwards compatibility
_lowercase : str = dense_act_fn
super().__init__(
pad_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , decoder_start_token_id=_UpperCamelCase , tie_word_embeddings=_UpperCamelCase , is_decoder=_UpperCamelCase , **_UpperCamelCase , )
@classmethod
def _lowerCamelCase ( cls , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCamelCase )
_lowercase , _lowercase : Tuple = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
_lowercase : int = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase )
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Optional[Any] = 'pix2struct_vision_model'
def __init__( self , _UpperCamelCase=768 , _UpperCamelCase=768 , _UpperCamelCase=2048 , _UpperCamelCase=64 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase="gelu_new" , _UpperCamelCase=1E-6 , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1E-1_0 , _UpperCamelCase=1.0 , _UpperCamelCase=4096 , _UpperCamelCase=32 , _UpperCamelCase=128 , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(**_UpperCamelCase )
_lowercase : int = hidden_size
_lowercase : List[Any] = patch_embed_hidden_size
_lowercase : List[str] = d_ff
_lowercase : int = dropout_rate
_lowercase : str = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : int = initializer_range
_lowercase : int = initializer_factor
_lowercase : Any = attention_dropout
_lowercase : Dict = layer_norm_eps
_lowercase : Optional[Any] = dense_act_fn
_lowercase : List[Any] = seq_len
_lowercase : List[Any] = relative_attention_num_buckets
_lowercase : List[str] = relative_attention_max_distance
_lowercase : Optional[Any] = d_kv
@classmethod
def _lowerCamelCase ( cls , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCamelCase )
_lowercase , _lowercase : List[Any] = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
_lowercase : Any = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase )
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Any = 'pix2struct'
_SCREAMING_SNAKE_CASE : Optional[int] = True
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=1.0 , _UpperCamelCase=0.0_2 , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=True , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(tie_word_embeddings=_UpperCamelCase , is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
if text_config is None:
_lowercase : Optional[Any] = {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
_lowercase : str = {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
_lowercase : Union[str, Any] = PixaStructTextConfig(**_UpperCamelCase )
_lowercase : Union[str, Any] = PixaStructVisionConfig(**_UpperCamelCase )
_lowercase : int = self.text_config.decoder_start_token_id
_lowercase : Tuple = self.text_config.pad_token_id
_lowercase : Union[str, Any] = self.text_config.eos_token_id
_lowercase : Tuple = initializer_factor
_lowercase : Any = initializer_range
_lowercase : List[Any] = self.initializer_range
_lowercase : List[Any] = self.initializer_range
_lowercase : List[str] = is_vqa
@classmethod
def _lowerCamelCase ( cls , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = copy.deepcopy(self.__dict__ )
_lowercase : str = self.text_config.to_dict()
_lowercase : Union[str, Any] = self.vision_config.to_dict()
_lowercase : Union[str, Any] = self.__class__.model_type
return output
| 250
|
'''simple docstring'''
_snake_case = 8.3_1_4_4_5_9_8
def _A ( snake_case , snake_case ) -> float:
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 250
| 1
|
lowercase_ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def __lowerCAmelCase ( ):
lowercase__ = input("Enter message: " )
lowercase__ = input("Enter key [alphanumeric]: " )
lowercase__ = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
lowercase__ = 'encrypt'
lowercase__ = encrypt_message(lowercase__ , lowercase__ )
elif mode.lower().startswith("d" ):
lowercase__ = 'decrypt'
lowercase__ = decrypt_message(lowercase__ , lowercase__ )
print(f'''\n{mode.title()}ed message:''' )
print(lowercase__ )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return translate_message(lowercase__ , lowercase__ , "encrypt" )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return translate_message(lowercase__ , lowercase__ , "decrypt" )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = []
lowercase__ = 0
lowercase__ = key.upper()
for symbol in message:
lowercase__ = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowercase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowercase__ ):
lowercase__ = 0
else:
translated.append(lowercase__ )
return "".join(lowercase__ )
if __name__ == "__main__":
main()
| 362
|
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
lowercase_ = 2_9979_2458
# Symbols
lowercase_ , lowercase_ , lowercase_ , lowercase_ = symbols("""ct x y z""")
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE_ ) ** 2 )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return np.array(
[
[gamma(SCREAMING_SNAKE_CASE_ ), -gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), 0, 0],
[-gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), gamma(SCREAMING_SNAKE_CASE_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
# Ensure event is not empty
if event is None:
lowercase__ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(SCREAMING_SNAKE_CASE_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
lowercase_ = transform(2997_9245)
print("""Example of four vector: """)
print(F'ct\' = {four_vector[0]}')
print(F'x\' = {four_vector[1]}')
print(F'y\' = {four_vector[2]}')
print(F'z\' = {four_vector[3]}')
# Substitute symbols with numerical values
lowercase_ = {ct: c, x: 1, y: 1, z: 1}
lowercase_ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F'\n{numerical_vector}')
| 224
| 0
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ = """mvp"""
UpperCAmelCase__ = ["""past_key_values"""]
UpperCAmelCase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , UpperCAmelCase : Optional[Any]=50267 , UpperCAmelCase : int=1024 , UpperCAmelCase : List[Any]=12 , UpperCAmelCase : Any=4096 , UpperCAmelCase : Dict=16 , UpperCAmelCase : Any=12 , UpperCAmelCase : Optional[int]=4096 , UpperCAmelCase : Optional[int]=16 , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : Optional[Any]="gelu" , UpperCAmelCase : int=1024 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : Optional[Any]=0.0_2 , UpperCAmelCase : str=0.0 , UpperCAmelCase : Any=False , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : str=1 , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : int=2 , UpperCAmelCase : str=2 , UpperCAmelCase : List[Any]=False , UpperCAmelCase : str=100 , UpperCAmelCase : Any=800 , **UpperCAmelCase : str , ) -> Any:
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = d_model
lowerCamelCase__ : List[Any] = encoder_ffn_dim
lowerCamelCase__ : List[Any] = encoder_layers
lowerCamelCase__ : int = encoder_attention_heads
lowerCamelCase__ : Union[str, Any] = decoder_ffn_dim
lowerCamelCase__ : List[Any] = decoder_layers
lowerCamelCase__ : Tuple = decoder_attention_heads
lowerCamelCase__ : Any = dropout
lowerCamelCase__ : Optional[int] = attention_dropout
lowerCamelCase__ : Any = activation_dropout
lowerCamelCase__ : str = activation_function
lowerCamelCase__ : Union[str, Any] = init_std
lowerCamelCase__ : int = encoder_layerdrop
lowerCamelCase__ : Optional[Any] = decoder_layerdrop
lowerCamelCase__ : List[Any] = classifier_dropout
lowerCamelCase__ : Optional[int] = use_cache
lowerCamelCase__ : Tuple = encoder_layers
lowerCamelCase__ : int = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase__ : List[Any] = use_prompt
lowerCamelCase__ : Union[str, Any] = prompt_length
lowerCamelCase__ : Tuple = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , forced_eos_token_id=UpperCAmelCase , **UpperCAmelCase , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'The config can simply be saved and uploaded again to be fixed.' )
| 50
|
SCREAMING_SNAKE_CASE :Any = 256
# Modulus to hash a string
SCREAMING_SNAKE_CASE :Union[str, Any] = 100_0003
def UpperCAmelCase ( a_ , a_ ) -> bool:
"""simple docstring"""
__A = len(a_ )
__A = len(a_ )
if p_len > t_len:
return False
__A = 0
__A = 0
__A = 1
# Calculating the hash of pattern and substring of text
for i in range(a_ ):
__A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
__A = "abc1abc12"
__A = "alskfjaldsabc1abc1abc12k23adsfabcabc"
__A = "alskfjaldsk23adsfabcabc"
assert rabin_karp(a_ , a_ ) and not rabin_karp(a_ , a_ )
# Test 2)
__A = "ABABX"
__A = "ABABZABABYABABX"
assert rabin_karp(a_ , a_ )
# Test 3)
__A = "AAAB"
__A = "ABAAAAAB"
assert rabin_karp(a_ , a_ )
# Test 4)
__A = "abcdabcy"
__A = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(a_ , a_ )
# Test 5)
__A = "Lü"
__A = "Lüsai"
assert rabin_karp(a_ , a_ )
__A = "Lue"
assert not rabin_karp(a_ , a_ )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 15
| 0
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __lowerCamelCase ( lowerCAmelCase__ ):
for param in module.parameters():
lowerCAmelCase__ = False
def __lowerCamelCase ( ):
lowerCAmelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase__ = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = plt.imshow(lowerCAmelCase__ )
fig.axes.get_xaxis().set_visible(lowerCAmelCase__ )
fig.axes.get_yaxis().set_visible(lowerCAmelCase__ )
plt.show()
def __lowerCamelCase ( ):
lowerCAmelCase__ = datetime.now()
lowerCAmelCase__ = current_time.strftime('%H:%M:%S' )
return timestamp
| 119
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = ['input_values', 'attention_mask']
def __init__( self : Union[str, Any] , lowercase__ : int = 1 , lowercase__ : int = 16_000 , lowercase__ : float = 0.0 , lowercase__ : bool = False , lowercase__ : int = 80 , lowercase__ : int = 16 , lowercase__ : int = 64 , lowercase__ : str = "hann_window" , lowercase__ : float = 1.0 , lowercase__ : float = 80 , lowercase__ : float = 7_600 , lowercase__ : float = 1e-10 , lowercase__ : int = 2 , lowercase__ : bool = True , **lowercase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(feature_size=lowercase__ , sampling_rate=lowercase__ , padding_value=lowercase__ , **lowercase__)
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = return_attention_mask
lowerCAmelCase__ = num_mel_bins
lowerCAmelCase__ = hop_length
lowerCAmelCase__ = win_length
lowerCAmelCase__ = win_function
lowerCAmelCase__ = frame_signal_scale
lowerCAmelCase__ = fmin
lowerCAmelCase__ = fmax
lowerCAmelCase__ = mel_floor
lowerCAmelCase__ = reduction_factor
lowerCAmelCase__ = win_length * sampling_rate // 1_000
lowerCAmelCase__ = hop_length * sampling_rate // 1_000
lowerCAmelCase__ = optimal_fft_length(self.sample_size)
lowerCAmelCase__ = (self.n_fft // 2) + 1
lowerCAmelCase__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowercase__)
lowerCAmelCase__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , lowercase__ , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , lowercase__ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __snake_case ( lowercase__ : List[np.ndarray] , lowercase__ : List[np.ndarray] , lowercase__ : float = 0.0):
'''simple docstring'''
if attention_mask is not None:
lowerCAmelCase__ = np.array(lowercase__ , np.intaa)
lowerCAmelCase__ = []
for vector, length in zip(lowercase__ , attention_mask.sum(-1)):
lowerCAmelCase__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
if length < normed_slice.shape[0]:
lowerCAmelCase__ = padding_value
normed_input_values.append(lowercase__)
else:
lowerCAmelCase__ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
return normed_input_values
def __snake_case ( self : List[Any] , lowercase__ : np.ndarray , ):
'''simple docstring'''
lowerCAmelCase__ = spectrogram(
lowercase__ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : str , lowercase__ : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , lowercase__ : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , lowercase__ : Union[bool, str, PaddingStrategy] = False , lowercase__ : Optional[int] = None , lowercase__ : bool = False , lowercase__ : Optional[int] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[Union[str, TensorType]] = None , lowercase__ : Optional[int] = None , **lowercase__ : Union[str, Any] , ):
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.')
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
if audio is not None:
lowerCAmelCase__ = self._process_audio(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ , )
else:
lowerCAmelCase__ = None
if audio_target is not None:
lowerCAmelCase__ = self._process_audio(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ , )
if inputs is None:
return inputs_target
else:
lowerCAmelCase__ = inputs_target['input_values']
lowerCAmelCase__ = inputs_target.get('attention_mask')
if decoder_attention_mask is not None:
lowerCAmelCase__ = decoder_attention_mask
return inputs
def __snake_case ( self : Tuple , lowercase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase__ : bool = False , lowercase__ : Union[bool, str, PaddingStrategy] = False , lowercase__ : Optional[int] = None , lowercase__ : bool = False , lowercase__ : Optional[int] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[Union[str, TensorType]] = None , **lowercase__ : Optional[Any] , ):
'''simple docstring'''
lowerCAmelCase__ = isinstance(lowercase__ , np.ndarray) and len(speech.shape) > 1
if is_batched_numpy and len(speech.shape) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""")
lowerCAmelCase__ = is_batched_numpy or (
isinstance(lowercase__ , (list, tuple)) and (isinstance(speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase__ = [np.asarray(lowercase__ , dtype=np.floataa) for speech in speech]
elif not is_batched and not isinstance(lowercase__ , np.ndarray):
lowerCAmelCase__ = np.asarray(lowercase__ , dtype=np.floataa)
elif isinstance(lowercase__ , np.ndarray) and speech.dtype is np.dtype(np.floataa):
lowerCAmelCase__ = speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase__ = [speech]
# needed to make pad() work on spectrogram inputs
lowerCAmelCase__ = self.feature_size
# convert into correct format for padding
if is_target:
lowerCAmelCase__ = [self._extract_mel_features(lowercase__) for waveform in speech]
lowerCAmelCase__ = BatchFeature({'input_values': features})
lowerCAmelCase__ = self.num_mel_bins
else:
lowerCAmelCase__ = BatchFeature({'input_values': speech})
lowerCAmelCase__ = self.pad(
lowercase__ , padding=lowercase__ , max_length=lowercase__ , truncation=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , **lowercase__ , )
lowerCAmelCase__ = feature_size_hack
# convert input values to correct format
lowerCAmelCase__ = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray):
lowerCAmelCase__ = [np.asarray(lowercase__ , dtype=np.floataa) for array in input_values]
elif (
not isinstance(lowercase__ , np.ndarray)
and isinstance(input_values[0] , np.ndarray)
and input_values[0].dtype is np.dtype(np.floataa)
):
lowerCAmelCase__ = [array.astype(np.floataa) for array in input_values]
elif isinstance(lowercase__ , np.ndarray) and input_values.dtype is np.dtype(np.floataa):
lowerCAmelCase__ = input_values.astype(np.floataa)
# convert attention_mask to correct format
lowerCAmelCase__ = padded_inputs.get('attention_mask')
if attention_mask is not None:
lowerCAmelCase__ = [np.asarray(lowercase__ , dtype=np.intaa) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowerCAmelCase__ = (
attention_mask
if self._get_padding_strategies(lowercase__ , max_length=lowercase__) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase__ = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=lowercase__ , padding_value=self.padding_value)
if return_tensors is not None:
lowerCAmelCase__ = padded_inputs.convert_to_tensors(lowercase__)
return padded_inputs
def __snake_case ( self : Any):
'''simple docstring'''
lowerCAmelCase__ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowerCAmelCase__ = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 119
| 1
|
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase_ ( a__ ):
def __init__( self , a , a , a = None , a = None , a = False , **a , ):
super().__init__(features=a , cache_dir=a , keep_in_memory=a , **a )
UpperCamelCase__ = Sql(
cache_dir=a , features=a , sql=a , con=a , **a , )
def __a ( self ):
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , )
# Build dataset for splits
UpperCamelCase__ = self.builder.as_dataset(
split="train" , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
def __init__( self , a , a , a , a = None , a = None , **a , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCamelCase__ = dataset
UpperCamelCase__ = name
UpperCamelCase__ = con
UpperCamelCase__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCamelCase__ = num_proc
UpperCamelCase__ = to_sql_kwargs
def __a ( self ):
UpperCamelCase__ = self.to_sql_kwargs.pop("sql" , a )
UpperCamelCase__ = self.to_sql_kwargs.pop("con" , a )
UpperCamelCase__ = self.to_sql_kwargs.pop("index" , a )
UpperCamelCase__ = self._write(index=a , **self.to_sql_kwargs )
return written
def __a ( self , a ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = args
UpperCamelCase__ = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
UpperCamelCase__ = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCamelCase__ = batch.to_pandas()
UpperCamelCase__ = df.to_sql(self.name , self.con , index=a , **a )
return num_rows or len(a )
def __a ( self , a , **a ):
UpperCamelCase__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCamelCase__ , UpperCamelCase__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 80
|
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase_ ( a__ ):
def __init__( self , a , a , a = None , a = None , a = False , **a , ):
super().__init__(features=a , cache_dir=a , keep_in_memory=a , **a )
UpperCamelCase__ = Sql(
cache_dir=a , features=a , sql=a , con=a , **a , )
def __a ( self ):
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , )
# Build dataset for splits
UpperCamelCase__ = self.builder.as_dataset(
split="train" , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
def __init__( self , a , a , a , a = None , a = None , **a , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCamelCase__ = dataset
UpperCamelCase__ = name
UpperCamelCase__ = con
UpperCamelCase__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCamelCase__ = num_proc
UpperCamelCase__ = to_sql_kwargs
def __a ( self ):
UpperCamelCase__ = self.to_sql_kwargs.pop("sql" , a )
UpperCamelCase__ = self.to_sql_kwargs.pop("con" , a )
UpperCamelCase__ = self.to_sql_kwargs.pop("index" , a )
UpperCamelCase__ = self._write(index=a , **self.to_sql_kwargs )
return written
def __a ( self , a ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = args
UpperCamelCase__ = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
UpperCamelCase__ = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCamelCase__ = batch.to_pandas()
UpperCamelCase__ = df.to_sql(self.name , self.con , index=a , **a )
return num_rows or len(a )
def __a ( self , a , **a ):
UpperCamelCase__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCamelCase__ , UpperCamelCase__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 80
| 1
|
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
def A__ ( lowerCamelCase ) -> List[int]:
if isinstance(lowerCamelCase , np.ndarray ):
return list(tensor.shape )
UpperCamelCase_: Union[str, Any] = tf.shape(lowerCamelCase )
if tensor.shape == tf.TensorShape(lowerCamelCase ):
return dynamic
UpperCamelCase_: Tuple = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(lowerCamelCase )]
def A__ ( lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> tf.Tensor:
return tf.nn.softmax(logits=logits + 1E-9 , axis=lowerCamelCase , name=lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=1E-5 , lowerCamelCase=-1 ) -> List[str]:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowerCamelCase , lowerCamelCase ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
UpperCamelCase_, UpperCamelCase_: List[str] = tf.nn.moments(lowerCamelCase , axes=[axis] , keepdims=lowerCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
UpperCamelCase_: Union[str, Any] = [1] * inputs.shape.rank
UpperCamelCase_: str = shape_list(lowerCamelCase )[axis]
UpperCamelCase_: Optional[int] = tf.reshape(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Tuple = tf.reshape(lowerCamelCase , lowerCamelCase )
# Compute layer normalization using the batch_normalization
# function.
UpperCamelCase_: int = tf.nn.batch_normalization(
lowerCamelCase , lowerCamelCase , lowerCamelCase , offset=lowerCamelCase , scale=lowerCamelCase , variance_epsilon=lowerCamelCase , )
return outputs
def A__ ( lowerCamelCase , lowerCamelCase=0 , lowerCamelCase=-1 ) -> Any:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
UpperCamelCase_: List[str] = tf.shape(lowerCamelCase )
UpperCamelCase_: str = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
UpperCamelCase_: int = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(lowerCamelCase , lowerCamelCase )
def A__ ( lowerCamelCase ) -> tf.Tensor:
if not isinstance(lowerCamelCase , tf.Tensor ):
UpperCamelCase_: List[Any] = tf.convert_to_tensor(lowerCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
UpperCamelCase_: int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
UpperCamelCase_: Optional[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
UpperCamelCase_: Dict = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = "input_ids" ) -> None:
tf.debugging.assert_less(
lowerCamelCase , tf.cast(lowerCamelCase , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(lowerCamelCase )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
UpperCamelCase_: List[Any] = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
UpperCamelCase_: Union[str, Any] = [x for x in data if len(lowerCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
UpperCamelCase_: List[Any] = np.asarray(lowerCamelCase )
UpperCamelCase_: Optional[Any] = 1
UpperCamelCase_: Tuple = np.array_split(lowerCamelCase , lowerCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
UpperCamelCase_: Tuple = np.array_split(lowerCamelCase , lowerCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(lowerCamelCase ):
UpperCamelCase_: Optional[int] = chunk_data
else:
UpperCamelCase_: Tuple = data
def A__ ( lowerCamelCase , lowerCamelCase ) -> Optional[int]:
if name in group.attrs:
UpperCamelCase_: Optional[Any] = [n.decode("""utf8""" ) if hasattr(lowerCamelCase , """decode""" ) else n for n in group.attrs[name]]
else:
UpperCamelCase_: List[Any] = []
UpperCamelCase_: Tuple = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(lowerCamelCase , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def A__ ( lowerCamelCase ) -> Dict:
def _expand_single_ad_tensor(lowerCamelCase ):
if isinstance(lowerCamelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(lowerCamelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , lowerCamelCase )
| 223
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = PegasusConfig
__UpperCamelCase : str = {}
__UpperCamelCase : Optional[Any] = """gelu"""
def __init__( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : str=13 , snake_case_ : Dict=7 , snake_case_ : List[Any]=True , snake_case_ : Optional[int]=False , snake_case_ : Any=99 , snake_case_ : Optional[Any]=32 , snake_case_ : Dict=2 , snake_case_ : Any=4 , snake_case_ : Optional[Any]=37 , snake_case_ : Dict=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : List[str]=40 , snake_case_ : Tuple=2 , snake_case_ : Optional[int]=1 , snake_case_ : str=0 , ):
UpperCamelCase_: List[str] = parent
UpperCamelCase_: Optional[Any] = batch_size
UpperCamelCase_: Union[str, Any] = seq_length
UpperCamelCase_: Tuple = is_training
UpperCamelCase_: Tuple = use_labels
UpperCamelCase_: Tuple = vocab_size
UpperCamelCase_: Tuple = hidden_size
UpperCamelCase_: Optional[Any] = num_hidden_layers
UpperCamelCase_: List[Any] = num_attention_heads
UpperCamelCase_: Optional[int] = intermediate_size
UpperCamelCase_: Dict = hidden_dropout_prob
UpperCamelCase_: str = attention_probs_dropout_prob
UpperCamelCase_: Optional[int] = max_position_embeddings
UpperCamelCase_: Union[str, Any] = eos_token_id
UpperCamelCase_: Optional[int] = pad_token_id
UpperCamelCase_: List[Any] = bos_token_id
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_: int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_: List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_: List[str] = prepare_pegasus_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
return config, inputs_dict
def lowerCAmelCase__ ( self : Any , snake_case_ : List[str] , snake_case_ : Dict ):
UpperCamelCase_: Any = TFPegasusModel(config=snake_case_ ).get_decoder()
UpperCamelCase_: Any = inputs_dict["""input_ids"""]
UpperCamelCase_: int = input_ids[:1, :]
UpperCamelCase_: List[str] = inputs_dict["""attention_mask"""][:1, :]
UpperCamelCase_: Tuple = inputs_dict["""head_mask"""]
UpperCamelCase_: int = 1
# first forward pass
UpperCamelCase_: Dict = model(snake_case_ , attention_mask=snake_case_ , head_mask=snake_case_ , use_cache=snake_case_ )
UpperCamelCase_, UpperCamelCase_: List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase_: Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_: Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase_: Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase_: Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase_: List[Any] = model(snake_case_ , attention_mask=snake_case_ )[0]
UpperCamelCase_: Dict = model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase_: str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase_: str = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase_: int = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case_ , snake_case_ , rtol=1e-3 )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
if attention_mask is None:
UpperCamelCase_: Union[str, Any] = tf.cast(tf.math.not_equal(lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase_: str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase_: Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase_: Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase_: str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCamelCase ( _A , _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__UpperCamelCase : str = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase : int = (
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Any = False
__UpperCamelCase : Dict = False
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = TFPegasusModelTester(self )
UpperCamelCase_: List[Any] = ConfigTester(self , config_class=snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__UpperCamelCase : Optional[int] = [
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__UpperCamelCase : Union[str, Any] = """google/pegasus-xsum"""
@cached_property
def lowerCAmelCase__ ( self : Dict ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCAmelCase__ ( self : Union[str, Any] , **snake_case_ : Optional[int] ):
UpperCamelCase_: str = self.translate_src_text(**snake_case_ )
assert self.expected_text == generated_words
def lowerCAmelCase__ ( self : Optional[Any] , **snake_case_ : int ):
UpperCamelCase_: Tuple = self.tokenizer(self.src_text , **snake_case_ , padding=snake_case_ , return_tensors="""tf""" )
UpperCamelCase_: Tuple = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=snake_case_ , )
UpperCamelCase_: Tuple = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case_ )
return generated_words
@slow
def lowerCAmelCase__ ( self : Optional[Any] ):
self._assert_generated_batch_equal_expected()
| 223
| 1
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class snake_case__ :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BlenderbotSmallConfig
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = """gelu"""
def __init__( self : List[Any], _snake_case : Union[str, Any], _snake_case : List[str]=1_3, _snake_case : List[Any]=7, _snake_case : Dict=True, _snake_case : Any=False, _snake_case : Optional[int]=9_9, _snake_case : Dict=3_2, _snake_case : Tuple=2, _snake_case : Optional[int]=4, _snake_case : List[Any]=3_7, _snake_case : Optional[int]=0.1, _snake_case : List[str]=0.1, _snake_case : Dict=2_0, _snake_case : Union[str, Any]=2, _snake_case : Optional[Any]=1, _snake_case : Optional[int]=0, ) ->str:
snake_case__ : List[str] = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Optional[Any] = seq_length
snake_case__ : str = is_training
snake_case__ : Any = use_labels
snake_case__ : Optional[Any] = vocab_size
snake_case__ : List[Any] = hidden_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : int = max_position_embeddings
snake_case__ : Optional[Any] = eos_token_id
snake_case__ : List[Any] = pad_token_id
snake_case__ : Any = bos_token_id
def lowercase_ ( self : Optional[Any] ) ->Optional[int]:
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
snake_case__ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
snake_case__ : List[str] = tf.concat([input_ids, eos_tensor], axis=1 )
snake_case__ : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
snake_case__ : Dict = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
snake_case__ : str = prepare_blenderbot_small_inputs_dict(_snake_case, _snake_case, _snake_case )
return config, inputs_dict
def lowercase_ ( self : Dict, _snake_case : Dict, _snake_case : List[Any] ) ->Tuple:
snake_case__ : str = TFBlenderbotSmallModel(config=_snake_case ).get_decoder()
snake_case__ : List[str] = inputs_dict['input_ids']
snake_case__ : Tuple = input_ids[:1, :]
snake_case__ : str = inputs_dict['attention_mask'][:1, :]
snake_case__ : Optional[Any] = inputs_dict['head_mask']
snake_case__ : Union[str, Any] = 1
# first forward pass
snake_case__ : Dict = model(_snake_case, attention_mask=_snake_case, head_mask=_snake_case, use_cache=_snake_case )
snake_case__ , snake_case__ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ : Optional[int] = ids_tensor((self.batch_size, 3), config.vocab_size )
snake_case__ : Any = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
snake_case__ : int = tf.concat([input_ids, next_tokens], axis=-1 )
snake_case__ : List[str] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
snake_case__ : Tuple = model(_snake_case, attention_mask=_snake_case )[0]
snake_case__ : int = model(_snake_case, attention_mask=_snake_case, past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
snake_case__ : List[str] = int(ids_tensor((1,), output_from_past.shape[-1] ) )
snake_case__ : Tuple = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case, _snake_case, rtol=1e-3 )
def lowercase_ (A : Dict , A : Any , A : Optional[int] , A : Dict=None , A : int=None , A : List[str]=None , A : Union[str, Any]=None , A : Optional[Any]=None , ):
if attention_mask is None:
snake_case__ : Union[str, Any] = tf.cast(tf.math.not_equal(A , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_SCREAMING_SNAKE_CASE = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE = (
{
"""conversational""": TFBlenderbotSmallForConditionalGeneration,
"""feature-extraction""": TFBlenderbotSmallModel,
"""summarization""": TFBlenderbotSmallForConditionalGeneration,
"""text2text-generation""": TFBlenderbotSmallForConditionalGeneration,
"""translation""": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def lowercase_ ( self : List[Any] ) ->List[str]:
snake_case__ : Optional[int] = TFBlenderbotSmallModelTester(self )
snake_case__ : List[Any] = ConfigTester(self, config_class=_snake_case )
def lowercase_ ( self : List[Any] ) ->Any:
self.config_tester.run_common_tests()
def lowercase_ ( self : Union[str, Any] ) ->Optional[Any]:
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_tokenizers
@require_tf
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [
"""Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """
""" i'm going to throw up.\nand why is that?"""
]
_SCREAMING_SNAKE_CASE = """facebook/blenderbot_small-90M"""
@cached_property
def lowercase_ ( self : List[Any] ) ->List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
@cached_property
def lowercase_ ( self : List[Any] ) ->List[str]:
snake_case__ : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowercase_ ( self : Union[str, Any] ) ->Optional[int]:
snake_case__ : Any = self.tokenizer(self.src_text, return_tensors='tf' )
snake_case__ : List[str] = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=_snake_case, )
snake_case__ : List[str] = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=_snake_case )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 277
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a_ :Any = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
a_ :List[str] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
a_ :List[str] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self : str ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ), id='references' ),
} ), )
def lowercase_ ( self : str, _snake_case : List[List[List[str]]], _snake_case : List[List[str]], _snake_case : int = 1, _snake_case : int = 4, ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case, hypotheses=_snake_case, min_len=_snake_case, max_len=_snake_case )
}
| 277
| 1
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__magic_name__: str = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__magic_name__: Dict = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : Optional[int] = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ), dtype=_A )[0]
@deprecated(_A, """Please use tf.data to implement this functionality.""" )
def UpperCamelCase ( _A ):
"""simple docstring"""
print("""Extracting""", f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
__magic_name__ : Optional[Any] = _readaa(_A )
if magic != 2051:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
__magic_name__ : int = _readaa(_A )
__magic_name__ : Tuple = _readaa(_A )
__magic_name__ : str = _readaa(_A )
__magic_name__ : Any = bytestream.read(rows * cols * num_images )
__magic_name__ : List[str] = numpy.frombuffer(_A, dtype=numpy.uinta )
__magic_name__ : List[str] = data.reshape(_A, _A, _A, 1 )
return data
@deprecated(_A, """Please use tf.one_hot on tensors.""" )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : Any = labels_dense.shape[0]
__magic_name__ : Union[str, Any] = numpy.arange(_A ) * num_classes
__magic_name__ : str = numpy.zeros((num_labels, num_classes) )
__magic_name__ : Optional[Any] = 1
return labels_one_hot
@deprecated(_A, """Please use tf.data to implement this functionality.""" )
def UpperCamelCase ( _A, _A=False, _A=10 ):
"""simple docstring"""
print("""Extracting""", f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
__magic_name__ : List[str] = _readaa(_A )
if magic != 2049:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
__magic_name__ : Tuple = _readaa(_A )
__magic_name__ : Union[str, Any] = bytestream.read(_A )
__magic_name__ : Dict = numpy.frombuffer(_A, dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_A, _A )
return labels
class snake_case__ :
@deprecated(
lowerCAmelCase__ , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=dtypes.floataa , lowerCAmelCase__=True , lowerCAmelCase__=None , ) -> int:
__magic_name__ ,__magic_name__ : Dict = random_seed.get_seed(lowerCAmelCase__ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__magic_name__ : int = dtypes.as_dtype(lowerCAmelCase__ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
__magic_name__ : Optional[Any] = 1_00_00
__magic_name__ : str = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
__magic_name__ : Dict = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__magic_name__ : List[str] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__magic_name__ : List[str] = images.astype(numpy.floataa )
__magic_name__ : int = numpy.multiply(lowerCAmelCase__ , 1.0 / 2_5_5.0 )
__magic_name__ : Optional[Any] = images
__magic_name__ : Optional[Any] = labels
__magic_name__ : str = 0
__magic_name__ : Union[str, Any] = 0
@property
def __magic_name__ ( self ) -> Dict:
return self._images
@property
def __magic_name__ ( self ) -> Any:
return self._labels
@property
def __magic_name__ ( self ) -> str:
return self._num_examples
@property
def __magic_name__ ( self ) -> Any:
return self._epochs_completed
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=True ) -> int:
if fake_data:
__magic_name__ : Tuple = [1] * 7_84
__magic_name__ : Dict = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(lowerCAmelCase__ )],
[fake_label for _ in range(lowerCAmelCase__ )],
)
__magic_name__ : Any = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__magic_name__ : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(lowerCAmelCase__ )
__magic_name__ : List[Any] = self.images[perma]
__magic_name__ : int = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__magic_name__ : Any = self._num_examples - start
__magic_name__ : List[Any] = self._images[start : self._num_examples]
__magic_name__ : Optional[Any] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__magic_name__ : Optional[Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(lowerCAmelCase__ )
__magic_name__ : int = self.images[perm]
__magic_name__ : Dict = self.labels[perm]
# Start next epoch
__magic_name__ : Tuple = 0
__magic_name__ : Optional[int] = batch_size - rest_num_examples
__magic_name__ : List[str] = self._index_in_epoch
__magic_name__ : Dict = self._images[start:end]
__magic_name__ : int = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__magic_name__ : str = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_A, """Please write your own downloading logic.""" )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
if not gfile.Exists(_A ):
gfile.MakeDirs(_A )
__magic_name__ : Optional[int] = os.path.join(_A, _A )
if not gfile.Exists(_A ):
urllib.request.urlretrieve(_A, _A ) # noqa: S310
with gfile.GFile(_A ) as f:
__magic_name__ : Any = f.size()
print("""Successfully downloaded""", _A, _A, """bytes.""" )
return filepath
@deprecated(
_A, """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" )
def UpperCamelCase ( _A, _A=False, _A=False, _A=dtypes.floataa, _A=True, _A=5000, _A=None, _A=DEFAULT_SOURCE_URL, ):
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[], [], fake_data=_A, one_hot=_A, dtype=_A, seed=_A )
__magic_name__ : Tuple = fake()
__magic_name__ : List[Any] = fake()
__magic_name__ : Optional[int] = fake()
return _Datasets(train=_A, validation=_A, test=_A )
if not source_url: # empty string check
__magic_name__ : str = DEFAULT_SOURCE_URL
__magic_name__ : Optional[Any] = """train-images-idx3-ubyte.gz"""
__magic_name__ : Any = """train-labels-idx1-ubyte.gz"""
__magic_name__ : Optional[Any] = """t10k-images-idx3-ubyte.gz"""
__magic_name__ : Union[str, Any] = """t10k-labels-idx1-ubyte.gz"""
__magic_name__ : str = _maybe_download(
_A, _A, source_url + train_images_file )
with gfile.Open(_A, """rb""" ) as f:
__magic_name__ : Optional[int] = _extract_images(_A )
__magic_name__ : Any = _maybe_download(
_A, _A, source_url + train_labels_file )
with gfile.Open(_A, """rb""" ) as f:
__magic_name__ : Dict = _extract_labels(_A, one_hot=_A )
__magic_name__ : Optional[int] = _maybe_download(
_A, _A, source_url + test_images_file )
with gfile.Open(_A, """rb""" ) as f:
__magic_name__ : str = _extract_images(_A )
__magic_name__ : Optional[int] = _maybe_download(
_A, _A, source_url + test_labels_file )
with gfile.Open(_A, """rb""" ) as f:
__magic_name__ : str = _extract_labels(_A, one_hot=_A )
if not 0 <= validation_size <= len(_A ):
__magic_name__ : List[str] = (
"""Validation size should be between 0 and """
f'{len(_A )}. Received: {validation_size}.'
)
raise ValueError(_A )
__magic_name__ : Tuple = train_images[:validation_size]
__magic_name__ : int = train_labels[:validation_size]
__magic_name__ : Any = train_images[validation_size:]
__magic_name__ : Union[str, Any] = train_labels[validation_size:]
__magic_name__ : List[Any] = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
__magic_name__ : Dict = _DataSet(_A, _A, **_A )
__magic_name__ : Optional[int] = _DataSet(_A, _A, **_A )
__magic_name__ : Dict = _DataSet(_A, _A, **_A )
return _Datasets(train=_A, validation=_A, test=_A )
| 138
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : str = StableUnCLIPImgaImgPipeline
lowercase__ : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase__ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ : Union[str, Any] = frozenset([] )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Any = 32
__magic_name__ : Union[str, Any] = embedder_hidden_size
# image encoding components
__magic_name__ : Optional[int] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__magic_name__ : Optional[Any] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCAmelCase__ , projection_dim=lowerCAmelCase__ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__magic_name__ : Any = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase__ )
__magic_name__ : int = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
__magic_name__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
__magic_name__ : Optional[Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__magic_name__ : str = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase__ , layers_per_block=1 , upcast_attention=lowerCAmelCase__ , use_linear_projection=lowerCAmelCase__ , )
torch.manual_seed(0 )
__magic_name__ : Optional[Any] = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , )
torch.manual_seed(0 )
__magic_name__ : List[str] = AutoencoderKL()
__magic_name__ : List[str] = {
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 , lowerCAmelCase__=True ) -> List[Any]:
if str(lowerCAmelCase__ ).startswith("""mps""" ):
__magic_name__ : Optional[int] = torch.manual_seed(lowerCAmelCase__ )
else:
__magic_name__ : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__magic_name__ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if pil_image:
__magic_name__ : Optional[Any] = input_image * 0.5 + 0.5
__magic_name__ : int = input_image.clamp(0 , 1 )
__magic_name__ : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__magic_name__ : Optional[int] = DiffusionPipeline.numpy_to_pil(lowerCAmelCase__ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : List[str] = self.get_dummy_components()
__magic_name__ : int = StableUnCLIPImgaImgPipeline(**lowerCAmelCase__ )
__magic_name__ : List[str] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Any = self.get_dummy_inputs(lowerCAmelCase__ )
inputs.update({"""image_embeds""": None} )
__magic_name__ : List[str] = sd_pipe(**lowerCAmelCase__ ).images
__magic_name__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ : int = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __magic_name__ ( self ) -> Dict:
__magic_name__ : int = torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase__ )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Tuple = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase__ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ ( self ) -> str:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCAmelCase__ )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
__magic_name__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
__magic_name__ : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__magic_name__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__magic_name__ : Union[str, Any] = pipe(lowerCAmelCase__ , """anime turle""" , generator=lowerCAmelCase__ , output_type="""np""" )
__magic_name__ : Optional[Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
__magic_name__ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
__magic_name__ : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__magic_name__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__magic_name__ : Any = pipe(lowerCAmelCase__ , """anime turle""" , generator=lowerCAmelCase__ , output_type="""np""" )
__magic_name__ : Any = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
__magic_name__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__magic_name__ : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
__magic_name__ : int = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__magic_name__ : List[Any] = pipe(
lowerCAmelCase__ , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
__magic_name__ : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 138
| 1
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = PriorTransformer
UpperCAmelCase__ : List[str] = "hidden_states"
@property
def _a ( self ) -> int:
__UpperCamelCase =4
__UpperCamelCase =8
__UpperCamelCase =7
__UpperCamelCase =floats_tensor((batch_size, embedding_dim) ).to(A_ )
__UpperCamelCase =floats_tensor((batch_size, embedding_dim) ).to(A_ )
__UpperCamelCase =floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(A_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _a ( self , A_=0 ) -> Dict:
torch.manual_seed(A_ )
__UpperCamelCase =4
__UpperCamelCase =8
__UpperCamelCase =7
__UpperCamelCase =torch.randn((batch_size, embedding_dim) ).to(A_ )
__UpperCamelCase =torch.randn((batch_size, embedding_dim) ).to(A_ )
__UpperCamelCase =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(A_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _a ( self ) -> Tuple:
return (4, 8)
@property
def _a ( self ) -> List[Any]:
return (4, 8)
def _a ( self ) -> str:
__UpperCamelCase ={
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
__UpperCamelCase =self.dummy_input
return init_dict, inputs_dict
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase , __UpperCamelCase =PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(A_ )
__UpperCamelCase =model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _a ( self ) -> Any:
__UpperCamelCase , __UpperCamelCase =self.prepare_init_args_and_inputs_for_common()
__UpperCamelCase =self.model_class(**A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
__UpperCamelCase =model.to(A_ )
if hasattr(A_ , 'set_default_attn_processor' ):
model.set_default_attn_processor()
__UpperCamelCase =self.get_dummy_seed_input()
with torch.no_grad():
__UpperCamelCase =model(**A_ )[0]
__UpperCamelCase =output[0, :5].flatten().cpu()
print(A_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__UpperCamelCase =torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(A_ , A_ , rtol=1E-2 ) )
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self , A_=1 , A_=768 , A_=77 , A_=0 ) -> Union[str, Any]:
torch.manual_seed(A_ )
__UpperCamelCase =batch_size
__UpperCamelCase =embedding_dim
__UpperCamelCase =num_embeddings
__UpperCamelCase =torch.randn((batch_size, embedding_dim) ).to(A_ )
__UpperCamelCase =torch.randn((batch_size, embedding_dim) ).to(A_ )
__UpperCamelCase =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(A_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _a ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def _a ( self , A_ , A_ ) -> int:
__UpperCamelCase =PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(A_ )
__UpperCamelCase =self.get_dummy_seed_input(seed=A_ )
with torch.no_grad():
__UpperCamelCase =model(**A_ )[0]
assert list(sample.shape ) == [1, 768]
__UpperCamelCase =sample[0, :8].flatten().cpu()
print(A_ )
__UpperCamelCase =torch.tensor(A_ )
assert torch_all_close(A_ , A_ , atol=1E-3 )
| 62
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A, '''tf_padding''' ) )
self.parent.assertTrue(hasattr(__A, '''depth_multiplier''' ) )
class __UpperCAmelCase :
def __init__( self : int, __A : List[Any], __A : str=1_3, __A : Dict=3, __A : int=3_2, __A : int=0.2_5, __A : List[str]=8, __A : int=8, __A : Dict=6, __A : str=3_2, __A : Any=True, __A : str=True, __A : int=True, __A : Union[str, Any]="relu6", __A : Any=1_2_8_0, __A : List[Any]=0.1, __A : Optional[Any]=0.0_2, __A : Tuple=True, __A : List[Any]=True, __A : str=1_0, __A : Optional[Any]=None, ):
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : str = image_size
UpperCAmelCase : Optional[int] = depth_multiplier
UpperCAmelCase : Union[str, Any] = depth_divisible_by
UpperCAmelCase : Optional[Any] = min_depth
UpperCAmelCase : List[str] = expand_ratio
UpperCAmelCase : Dict = tf_padding
UpperCAmelCase : str = output_stride
UpperCAmelCase : Union[str, Any] = first_layer_is_expansion
UpperCAmelCase : List[Any] = finegrained_output
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCAmelCase : Optional[Any] = classifier_dropout_prob
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : Tuple = num_labels
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Any = scope
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Dict = None
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def __magic_name__ ( self : List[Any], __A : Dict, __A : Optional[Any], __A : Optional[int], __A : Union[str, Any] ):
UpperCAmelCase : Any = MobileNetVaModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[Any] = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
self.parent.assertEqual(
result.pooler_output.shape, (self.batch_size, self.last_hidden_size), )
def __magic_name__ ( self : str, __A : Union[str, Any], __A : Dict, __A : Optional[Any], __A : str ):
UpperCAmelCase : Optional[int] = self.num_labels
UpperCAmelCase : Any = MobileNetVaForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[Any], __A : Optional[Any], __A : List[str], __A : Dict, __A : Dict ):
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Dict = MobileNetVaForSemanticSegmentation(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCAmelCase : Optional[Any] = model(__A, labels=__A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs
UpperCAmelCase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = MobileNetVaModelTester(self )
UpperCAmelCase : List[Any] = MobileNetVaConfigTester(self, config_class=__A, has_text_modality=__A )
def __magic_name__ ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def __magic_name__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def __magic_name__ ( self : Tuple ):
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def __magic_name__ ( self : Any ):
pass
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
UpperCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : int ):
def check_hidden_states_output(__A : Any, __A : Optional[Any], __A : str ):
UpperCAmelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(__A, __A ) )
UpperCAmelCase : Optional[Any] = outputs.hidden_states
UpperCAmelCase : List[Any] = 1_6
self.assertEqual(len(__A ), __A )
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@slow
def __magic_name__ ( self : Dict ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] = MobileNetVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> int:
UpperCAmelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : List[Any] ):
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : List[Any] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(__A )
UpperCAmelCase : Optional[int] = self.default_image_processor
UpperCAmelCase : Optional[Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : str = model(**__A )
# verify the logits
UpperCAmelCase : int = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = model.to(__A )
UpperCAmelCase : Tuple = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : int = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**__A )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase : Tuple = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape, __A )
UpperCAmelCase : Tuple = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
], device=__A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], __A, atol=1E-4 ) )
| 336
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCamelCase ( lowercase ):
def _lowercase (self : Any) -> List[Any]:
__snake_case : Any = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(_A , 'hidden_sizes'))
self.parent.assertTrue(hasattr(_A , 'num_attention_heads'))
self.parent.assertTrue(hasattr(_A , 'num_encoder_blocks'))
class UpperCamelCase :
def __init__(self : Optional[int] , _A : Any , _A : str=13 , _A : List[str]=64 , _A : List[Any]=3 , _A : Any=4 , _A : List[str]=[2, 2, 2, 2] , _A : Tuple=[8, 4, 2, 1] , _A : List[str]=[16, 32, 64, 1_28] , _A : int=[1, 4, 8, 16] , _A : List[str]=[1, 2, 4, 8] , _A : Dict=True , _A : Any=True , _A : List[str]="gelu" , _A : Optional[int]=0.1 , _A : Union[str, Any]=0.1 , _A : List[Any]=0.02 , _A : str=3 , _A : int=None , ) -> List[Any]:
__snake_case : int = parent
__snake_case : List[Any] = batch_size
__snake_case : Optional[int] = image_size
__snake_case : List[str] = num_channels
__snake_case : Any = num_encoder_blocks
__snake_case : Dict = sr_ratios
__snake_case : Any = depths
__snake_case : Tuple = hidden_sizes
__snake_case : Tuple = downsampling_rates
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Optional[int] = is_training
__snake_case : Any = use_labels
__snake_case : List[Any] = hidden_act
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : List[Any] = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Dict = scope
def _lowercase (self : List[Any]) -> Tuple:
__snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case : str = None
if self.use_labels:
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
__snake_case : Dict = self.get_config()
return config, pixel_values, labels
def _lowercase (self : Any) -> Optional[int]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowercase (self : List[str] , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any]) -> int:
__snake_case : Union[str, Any] = SegformerModel(config=_A)
model.to(_A)
model.eval()
__snake_case : str = model(_A)
__snake_case : List[Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def _lowercase (self : Tuple , _A : Dict , _A : Any , _A : int) -> str:
__snake_case : Any = self.num_labels
__snake_case : List[str] = SegformerForSemanticSegmentation(_A)
model.to(_A)
model.eval()
__snake_case : Dict = model(_A)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
__snake_case : Dict = model(_A , labels=_A)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def _lowercase (self : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str) -> List[Any]:
__snake_case : List[Any] = 1
__snake_case : str = SegformerForSemanticSegmentation(config=_A)
model.to(_A)
model.eval()
__snake_case : Any = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(_A)
__snake_case : Any = model(_A , labels=_A)
self.parent.assertGreater(result.loss , 0.0)
def _lowercase (self : Any) -> Optional[int]:
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : str = config_and_inputs
__snake_case : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase , lowercase , unittest.TestCase ):
UpperCAmelCase : List[str] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Dict = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[Any] = False
UpperCAmelCase : List[Any] = False
def _lowercase (self : str) -> Union[str, Any]:
__snake_case : Optional[int] = SegformerModelTester(self)
__snake_case : Any = SegformerConfigTester(self , config_class=_A)
def _lowercase (self : List[str]) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowercase (self : List[Any]) -> List[str]:
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A)
def _lowercase (self : Optional[int]) -> str:
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_A)
def _lowercase (self : int) -> Union[str, Any]:
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_A)
@unittest.skip('SegFormer does not use inputs_embeds')
def _lowercase (self : Union[str, Any]) -> str:
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods')
def _lowercase (self : int) -> str:
pass
def _lowercase (self : List[str]) -> Any:
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(_A)
__snake_case : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A)
def _lowercase (self : List[str]) -> List[Any]:
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] = True
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = True
__snake_case : List[str] = False
__snake_case : Tuple = True
__snake_case : int = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
__snake_case : int = model(**self._prepare_for_class(_A , _A))
__snake_case : Union[str, Any] = outputs.attentions
__snake_case : int = sum(self.model_tester.depths)
self.assertEqual(len(_A) , _A)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : int = True
__snake_case : Union[str, Any] = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
__snake_case : List[str] = model(**self._prepare_for_class(_A , _A))
__snake_case : Optional[int] = outputs.attentions
self.assertEqual(len(_A) , _A)
# verify the first attentions (first block, first layer)
__snake_case : Optional[int] = (self.model_tester.image_size // 4) ** 2
__snake_case : Tuple = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__snake_case : int = (self.model_tester.image_size // 32) ** 2
__snake_case : Any = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__snake_case : int = len(_A)
# Check attention is always last and order is fine
__snake_case : Any = True
__snake_case : Tuple = True
__snake_case : Optional[Any] = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
__snake_case : Any = model(**self._prepare_for_class(_A , _A))
self.assertEqual(out_len + 1 , len(_A))
__snake_case : List[Any] = outputs.attentions
self.assertEqual(len(_A) , _A)
# verify the first attentions (first block, first layer)
__snake_case : Any = (self.model_tester.image_size // 4) ** 2
__snake_case : List[Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _lowercase (self : str) -> List[str]:
def check_hidden_states_output(_A : Union[str, Any] , _A : List[str] , _A : Tuple):
__snake_case : Tuple = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
__snake_case : Optional[int] = model(**self._prepare_for_class(_A , _A))
__snake_case : List[str] = outputs.hidden_states
__snake_case : Tuple = self.model_tester.num_encoder_blocks
self.assertEqual(len(_A) , _A)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = True
check_hidden_states_output(_A , _A , _A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Any = True
check_hidden_states_output(_A , _A , _A)
def _lowercase (self : Optional[int]) -> int:
if not self.model_tester.is_training:
return
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = True
for model_class in self.all_model_classes:
if model_class in get_values(_A):
continue
__snake_case : Tuple = model_class(_A)
model.to(_A)
model.train()
__snake_case : str = self._prepare_for_class(_A , _A , return_labels=_A)
__snake_case : Dict = model(**_A).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def _lowercase (self : Tuple) -> Dict:
pass
@slow
def _lowercase (self : Any) -> List[str]:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[Any] = SegformerModel.from_pretrained(_A)
self.assertIsNotNone(_A)
def __UpperCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _lowercase (self : Union[str, Any]) -> Any:
# only resize + normalize
__snake_case : List[str] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_A , align=_A , do_random_crop=_A)
__snake_case : Tuple = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
_A)
__snake_case : Optional[Any] = prepare_img()
__snake_case : Tuple = image_processor(images=_A , return_tensors='pt')
__snake_case : List[str] = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
__snake_case : Any = model(_A)
__snake_case : Optional[int] = torch.Size((1, model.config.num_labels, 1_28, 1_28))
self.assertEqual(outputs.logits.shape , _A)
__snake_case : Tuple = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
]).to(_A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _A , atol=1E-4))
@slow
def _lowercase (self : Any) -> Optional[int]:
# only resize + normalize
__snake_case : int = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_A , align=_A , do_random_crop=_A)
__snake_case : Tuple = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024').to(_A)
__snake_case : str = prepare_img()
__snake_case : Union[str, Any] = image_processor(images=_A , return_tensors='pt')
__snake_case : str = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
__snake_case : Any = model(_A)
__snake_case : Any = torch.Size((1, model.config.num_labels, 1_28, 1_28))
self.assertEqual(outputs.logits.shape , _A)
__snake_case : List[Any] = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
]).to(_A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _A , atol=1E-1))
@slow
def _lowercase (self : Optional[int]) -> Union[str, Any]:
# only resize + normalize
__snake_case : List[Any] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_A , align=_A , do_random_crop=_A)
__snake_case : Optional[Any] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
_A)
__snake_case : Optional[Any] = prepare_img()
__snake_case : Optional[Any] = image_processor(images=_A , return_tensors='pt')
__snake_case : Union[str, Any] = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
__snake_case : Any = model(_A)
__snake_case : Optional[Any] = outputs.logits.detach().cpu()
__snake_case : Any = image_processor.post_process_semantic_segmentation(outputs=_A , target_sizes=[(5_00, 3_00)])
__snake_case : Any = torch.Size((5_00, 3_00))
self.assertEqual(segmentation[0].shape , _A)
__snake_case : Any = image_processor.post_process_semantic_segmentation(outputs=_A)
__snake_case : str = torch.Size((1_28, 1_28))
self.assertEqual(segmentation[0].shape , _A)
| 95
|
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
while a != 0:
__snake_case , __snake_case : Union[str, Any] = b % a, a
return b
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) != 1:
__snake_case : Union[str, Any] = F"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(UpperCAmelCase_ )
__snake_case , __snake_case , __snake_case : List[str] = 1, 0, a
__snake_case , __snake_case , __snake_case : Dict = 0, 1, m
while va != 0:
__snake_case : List[str] = ua // va
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : List[str] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 95
| 1
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BertJapaneseTokenizer
__snake_case = False
__snake_case = True
def __lowerCAmelCase ( self : List[str] ) ->Tuple:
"""simple docstring"""
super().setUp()
a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
a = '''こんにちは、世界。 \nこんばんは、世界。'''
a = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def __lowerCAmelCase ( self : str , __UpperCAmelCase : List[str] ) ->Union[str, Any]:
"""simple docstring"""
a , a = self.get_input_output_texts(__UpperCAmelCase )
a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
a = tokenizer.decode(__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
return text, ids
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
pass # TODO add if relevant
def __lowerCAmelCase ( self : int ) ->int:
"""simple docstring"""
pass # TODO add if relevant
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
pass # TODO add if relevant
def __lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
a = self.tokenizer_class(self.vocab_file )
a = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(__UpperCAmelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def __lowerCAmelCase ( self : List[str] ) ->Dict:
"""simple docstring"""
a = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(__UpperCAmelCase )
a = '''こんにちは、世界。\nこんばんは、世界。'''
a = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(__UpperCAmelCase , '''wb''' ) as handle:
pickle.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(__UpperCAmelCase , '''rb''' ) as handle:
a = pickle.load(__UpperCAmelCase )
a = tokenizer_new.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
try:
a = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
try:
a = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
a = MecabTokenizer(do_lower_case=__UpperCAmelCase , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
try:
a = MecabTokenizer(
do_lower_case=__UpperCAmelCase , normalize_text=__UpperCAmelCase , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def __lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
a = MecabTokenizer(normalize_text=__UpperCAmelCase , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def __lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
a = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(__UpperCAmelCase )
a = '''こんにちは、世界。\nこんばんは、世界。'''
a = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(__UpperCAmelCase , '''wb''' ) as handle:
pickle.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(__UpperCAmelCase , '''rb''' ) as handle:
a = pickle.load(__UpperCAmelCase )
a = tokenizer_new.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@require_sudachi
def __lowerCAmelCase ( self : str ) ->Optional[int]:
"""simple docstring"""
a = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __lowerCAmelCase ( self : Union[str, Any] ) ->Any:
"""simple docstring"""
a = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
a = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def __lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
a = SudachiTokenizer(do_lower_case=__UpperCAmelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __lowerCAmelCase ( self : Union[str, Any] ) ->Any:
"""simple docstring"""
a = SudachiTokenizer(normalize_text=__UpperCAmelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __lowerCAmelCase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
a = SudachiTokenizer(trim_whitespace=__UpperCAmelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
a = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(__UpperCAmelCase )
a = '''こんにちは、世界。\nこんばんは、世界。'''
a = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(__UpperCAmelCase , '''wb''' ) as handle:
pickle.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(__UpperCAmelCase , '''rb''' ) as handle:
a = pickle.load(__UpperCAmelCase )
a = tokenizer_new.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@require_jumanpp
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
a = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __lowerCAmelCase ( self : int ) ->Tuple:
"""simple docstring"""
a = JumanppTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __lowerCAmelCase ( self : List[str] ) ->Tuple:
"""simple docstring"""
a = JumanppTokenizer(normalize_text=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
a = JumanppTokenizer(trim_whitespace=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def __lowerCAmelCase ( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
a = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def __lowerCAmelCase ( self : List[Any] ) ->int:
"""simple docstring"""
a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
a = {}
for i, token in enumerate(__UpperCAmelCase ):
a = i
a = WordpieceTokenizer(vocab=__UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def __lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
a = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
a = tokenizer.subword_tokenizer
a = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(__UpperCAmelCase , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
a = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(__UpperCAmelCase , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
a = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
a = tokenizer.encode('''ありがとう。''' , add_special_tokens=__UpperCAmelCase )
a = tokenizer.encode('''どういたしまして。''' , add_special_tokens=__UpperCAmelCase )
a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BertJapaneseTokenizer
__snake_case = False
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
super().setUp()
a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : List[str] , **__UpperCAmelCase : str ) ->List[Any]:
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] ) ->int:
"""simple docstring"""
a = '''こんにちは、世界。 \nこんばんは、世界。'''
a = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def __lowerCAmelCase ( self : Any ) ->Optional[int]:
"""simple docstring"""
pass # TODO add if relevant
def __lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
pass # TODO add if relevant
def __lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
pass # TODO add if relevant
def __lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
a = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
a = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
__UpperCAmelCase , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def __lowerCAmelCase ( self : Any ) ->Optional[int]:
"""simple docstring"""
a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
a = {}
for i, token in enumerate(__UpperCAmelCase ):
a = i
a = CharacterTokenizer(vocab=__UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def __lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
a = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
a = tokenizer.encode('''ありがとう。''' , add_special_tokens=__UpperCAmelCase )
a = tokenizer.encode('''どういたしまして。''' , add_special_tokens=__UpperCAmelCase )
a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Union[str, Any] ) ->Any:
"""simple docstring"""
a = '''cl-tohoku/bert-base-japanese'''
a = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] ) ->int:
"""simple docstring"""
a = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(__UpperCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
a = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(__UpperCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _a ( unittest.TestCase ):
def __snake_case (self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __snake_case (self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_: Any = UNetaDModel(
sample_size=(32, 64), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("""AttnDownBlock2D""", """DownBlock2D"""), up_block_types=("""UpBlock2D""", """AttnUpBlock2D"""), )
return model
@property
def __snake_case (self ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase_: Optional[int] = UNetaDConditionModel(
sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D"""), up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D"""), cross_attention_dim=10, )
return model
@property
def __snake_case (self ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase_: Optional[int] = AutoencoderKL(
sample_size=(128, 64), in_channels=1, out_channels=1, latent_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D"""), up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D"""), )
UpperCAmelCase_: Optional[Any] = UNetaDModel(
sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("""AttnDownBlock2D""", """DownBlock2D"""), up_block_types=("""UpBlock2D""", """AttnUpBlock2D"""), )
return vqvae, unet
@slow
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: str = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: str = Mel(
x_res=self.dummy_unet.config.sample_size[1], y_res=self.dummy_unet.config.sample_size[0], )
UpperCAmelCase_: Tuple = DDPMScheduler()
UpperCAmelCase_: List[Any] = AudioDiffusionPipeline(vqvae=SCREAMING_SNAKE_CASE_, unet=self.dummy_unet, mel=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
UpperCAmelCase_: str = pipe(generator=SCREAMING_SNAKE_CASE_, steps=4 )
UpperCAmelCase_: Optional[Any] = output.audios[0]
UpperCAmelCase_: Optional[int] = output.images[0]
UpperCAmelCase_: Dict = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
UpperCAmelCase_: Dict = pipe(generator=SCREAMING_SNAKE_CASE_, steps=4, return_dict=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCAmelCase_: Optional[Any] = np.frombuffer(image.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: List[Any] = np.frombuffer(image_from_tuple.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: Dict = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_: int = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1], y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0], )
UpperCAmelCase_: List[str] = DDIMScheduler()
UpperCAmelCase_: int = self.dummy_vqvae_and_unet
UpperCAmelCase_: Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0], unet=dummy_vqvae_and_unet[1], mel=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
UpperCAmelCase_: Dict = np.random.uniform(-1, 1, ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCAmelCase_: List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
UpperCAmelCase_: Dict = pipe(raw_audio=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, start_step=5, steps=10 )
UpperCAmelCase_: Any = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCAmelCase_: Union[str, Any] = np.frombuffer(image.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: Union[str, Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_: Union[str, Any] = self.dummy_unet_condition
UpperCAmelCase_: Union[str, Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0], unet=SCREAMING_SNAKE_CASE_, mel=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
UpperCAmelCase_: List[str] = torch.rand((1, 1, 10) )
UpperCAmelCase_: Optional[int] = pipe(generator=SCREAMING_SNAKE_CASE_, encoding=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = output.images[0]
UpperCAmelCase_: Any = np.frombuffer(image.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def __snake_case (self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: str = torch_device
UpperCAmelCase_: Any = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
UpperCAmelCase_: Dict = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
UpperCAmelCase_: Union[str, Any] = pipe(generator=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = output.audios[0]
UpperCAmelCase_: Optional[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCAmelCase_: Optional[Any] = np.frombuffer(image.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: Any = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 147
| 0
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class A ( __UpperCAmelCase , unittest.TestCase ):
__snake_case = BertTokenizer
__snake_case = BertTokenizerFast
__snake_case = True
__snake_case = True
__snake_case = filter_non_english
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCAmelCase_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''UNwant\u00E9d,running'''
lowerCAmelCase_ = '''unwanted, running'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase__, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ), [9, 6, 7, 12, 10, 11] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = '''UNwant\u00E9d,running'''
lowerCAmelCase_ = tokenizer.tokenize(UpperCamelCase__ )
lowerCAmelCase_ = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
lowerCAmelCase_ = tokenizer.encode(UpperCamelCase__, add_special_tokens=UpperCamelCase__ )
lowerCAmelCase_ = rust_tokenizer.encode(UpperCamelCase__, add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(UpperCamelCase__ )
lowerCAmelCase_ = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
# With lower casing
lowerCAmelCase_ = self.get_tokenizer(do_lower_case=UpperCamelCase__ )
lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=UpperCamelCase__ )
lowerCAmelCase_ = '''UNwant\u00E9d,running'''
lowerCAmelCase_ = tokenizer.tokenize(UpperCamelCase__ )
lowerCAmelCase_ = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
lowerCAmelCase_ = tokenizer.encode(UpperCamelCase__, add_special_tokens=UpperCamelCase__ )
lowerCAmelCase_ = rust_tokenizer.encode(UpperCamelCase__, add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(UpperCamelCase__ )
lowerCAmelCase_ = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ), ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = BasicTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ), ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ), ['''hello'''] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = BasicTokenizer(do_lower_case=UpperCamelCase__, strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ), ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ), ['''h\u00E9llo'''] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = BasicTokenizer(do_lower_case=UpperCamelCase__, strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ), ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ), ['''hello'''] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = BasicTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ), ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ), ['''hello'''] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = BasicTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ), ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = BasicTokenizer(do_lower_case=UpperCamelCase__, strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ), ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = BasicTokenizer(do_lower_case=UpperCamelCase__, strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ), ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = BasicTokenizer(do_lower_case=UpperCamelCase__, never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ), ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = BasicTokenizer()
lowerCAmelCase_ = '''a\n\'ll !!to?\'d of, can\'t.'''
lowerCAmelCase_ = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(UpperCamelCase__ ), UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCAmelCase_ = {}
for i, token in enumerate(UpperCamelCase__ ):
lowerCAmelCase_ = i
lowerCAmelCase_ = WordpieceTokenizer(vocab=UpperCamelCase__, unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ), [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ), ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ), ['''[UNK]''', '''runn''', '''##ing'''] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCamelCase__ ) for t in ['''Test''', '''\xad''', '''test''']], [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCamelCase__ ) for t in ['''Test''', '''\xad''', '''test''']], [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
lowerCAmelCase_ = tokenizer.encode('''sequence builders''', add_special_tokens=UpperCamelCase__ )
lowerCAmelCase_ = tokenizer.encode('''multi-sequence build''', add_special_tokens=UpperCamelCase__ )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__, UpperCamelCase__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__, **UpperCamelCase__ )
lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCAmelCase_ = tokenizer_r.encode_plus(
UpperCamelCase__, return_attention_mask=UpperCamelCase__, return_token_type_ids=UpperCamelCase__, return_offsets_mapping=UpperCamelCase__, add_special_tokens=UpperCamelCase__, )
lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(UpperCamelCase__, '''do_lower_case''' ) else False
lowerCAmelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results], tokens['''offset_mapping'''] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = ['''的''', '''人''', '''有''']
lowerCAmelCase_ = ''''''.join(UpperCamelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = True
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(UpperCamelCase__, **UpperCamelCase__ )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__, **UpperCamelCase__ )
lowerCAmelCase_ = tokenizer_p.encode(UpperCamelCase__, add_special_tokens=UpperCamelCase__ )
lowerCAmelCase_ = tokenizer_r.encode(UpperCamelCase__, add_special_tokens=UpperCamelCase__ )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCamelCase__ )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCamelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
lowerCAmelCase_ = False
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__, **UpperCamelCase__ )
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(UpperCamelCase__, **UpperCamelCase__ )
lowerCAmelCase_ = tokenizer_r.encode(UpperCamelCase__, add_special_tokens=UpperCamelCase__ )
lowerCAmelCase_ = tokenizer_p.encode(UpperCamelCase__, add_special_tokens=UpperCamelCase__ )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCamelCase__ )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCamelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCamelCase__ )
]
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
| 167
|
import string
def __UpperCamelCase ( _A ):
for key in range(len(string.ascii_uppercase ) ):
lowerCAmelCase_ = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
lowerCAmelCase_ = string.ascii_uppercase.find(_A )
lowerCAmelCase_ = num - key
if num < 0:
lowerCAmelCase_ = num + len(string.ascii_uppercase )
lowerCAmelCase_ = translated + string.ascii_uppercase[num]
else:
lowerCAmelCase_ = translated + symbol
print(f"Decryption using Key #{key}: {translated}" )
def __UpperCamelCase ( ):
lowerCAmelCase_ = input('''Encrypted message: ''' )
lowerCAmelCase_ = message.upper()
decrypt(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 167
| 1
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
SCREAMING_SNAKE_CASE :Dict = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
SCREAMING_SNAKE_CASE :List[Any] = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
SCREAMING_SNAKE_CASE :Dict = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
SCREAMING_SNAKE_CASE :Union[str, Any] = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
SCREAMING_SNAKE_CASE :List[Any] = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[Any] ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) ,homepage="https://github.com/openai/human-eval" ,codebase_urls=["https://github.com/openai/human-eval"] ,reference_urls=["https://github.com/openai/human-eval"] ,license=_LICENSE ,)
def UpperCamelCase_ ( self : str ,A : int ,A : Union[str, Any] ,A : Optional[int]=[1, 10, 1_00] ,A : Union[str, Any]=4 ,A : Dict=3.0 ):
if os.getenv("HF_ALLOW_CODE_EVAL" ,0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=A ) as executor:
__A = []
__A = Counter()
__A = 0
__A = defaultdict(A )
for task_id, (candidates, test_case) in enumerate(zip(A ,A ) ):
for candidate in candidates:
__A = candidate + "\n" + test_case
__A = (test_program, timeout, task_id, completion_id[task_id])
__A = executor.submit(A ,*A )
futures.append(A )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(A ):
__A = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
__A , __A = [], []
for result in results.values():
result.sort()
__A = [r[1]["passed"] for r in result]
total.append(len(A ) )
correct.append(sum(A ) )
__A = np.array(A )
__A = np.array(A )
__A = k
__A = {f'''pass@{k}''': estimate_pass_at_k(A ,A ,A ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def UpperCAmelCase ( a_ , a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
def estimator(a_ , a_ , a_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(a_ , a_ ):
__A = itertools.repeat(a_ , len(a_ ) )
else:
assert len(a_ ) == len(a_ )
__A = iter(a_ )
return np.array([estimator(int(a_ ) , int(a_ ) , a_ ) for n, c in zip(a_ , a_ )] )
| 15
|
from __future__ import annotations
from math import ceil, floor, sqrt
def UpperCamelCase_( lowerCamelCase_ = 200_0000 ) -> int:
_lowercase : list[int] = [0]
_lowercase : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_lowercase : int = 0
# the area corresponding to the grid that gives the product closest to target
_lowercase : int = 0
# an estimate of b, using the quadratic formula
_lowercase : float
# the largest integer less than b_estimate
_lowercase : int
# the largest integer less than b_estimate
_lowercase : int
# the triangle number corresponding to b_floor
_lowercase : int
# the triangle number corresponding to b_ceil
_lowercase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_lowercase : Optional[int] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_lowercase : List[str] = floor(lowerCamelCase_ )
_lowercase : Dict = ceil(lowerCamelCase_ )
_lowercase : List[str] = triangle_numbers[b_floor]
_lowercase : List[str] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Union[str, Any] = triangle_b_first_guess * triangle_a
_lowercase : Union[str, Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Any = triangle_b_second_guess * triangle_a
_lowercase : Optional[Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"{solution() = }")
| 21
| 0
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase__ :Dict = logging.get_logger(__name__)
class __a ( enum.Enum ):
_a : List[Any] = 0
_a : str = 1
@add_end_docstrings(UpperCAmelCase )
class __a ( UpperCAmelCase ):
_a : Optional[int] = 'generated'
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = {}
if truncation is not None:
_UpperCAmelCase = truncation
_UpperCAmelCase = generate_kwargs
_UpperCAmelCase = {}
if return_tensors is not None and return_type is None:
_UpperCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCAmelCase = self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
return True
def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _SCREAMING_SNAKE_CASE ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
_UpperCAmelCase = ([prefix + arg for arg in args[0]],)
_UpperCAmelCase = True
elif isinstance(args[0] , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = (prefix + args[0],)
_UpperCAmelCase = False
else:
raise ValueError(
f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
_UpperCAmelCase = self.tokenizer(*_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if (
isinstance(args[0] , _SCREAMING_SNAKE_CASE )
and all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for el in args[0] )
and all(len(_SCREAMING_SNAKE_CASE ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=TruncationStrategy.DO_NOT_TRUNCATE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self._parse_and_tokenize(_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return inputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if self.framework == "pt":
_UpperCAmelCase = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
_UpperCAmelCase = tf.shape(model_inputs['input_ids'] ).numpy()
_UpperCAmelCase = generate_kwargs.get('min_length' , self.model.config.min_length )
_UpperCAmelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
self.check_inputs(_SCREAMING_SNAKE_CASE , generate_kwargs['min_length'] , generate_kwargs['max_length'] )
_UpperCAmelCase = self.model.generate(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCAmelCase = output_ids.reshape(_SCREAMING_SNAKE_CASE , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
_UpperCAmelCase = tf.reshape(_SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=ReturnType.TEXT , _SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCAmelCase = {f'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
_UpperCAmelCase = {
f'''{self.return_name}_text''': self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
}
records.append(_SCREAMING_SNAKE_CASE )
return records
@add_end_docstrings(UpperCAmelCase )
class __a ( UpperCAmelCase ):
_a : List[Any] = 'summary'
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'a summarization task, where outputs shorter than the input are typically wanted, you might '
f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(UpperCAmelCase )
class __a ( UpperCAmelCase ):
_a : int = 'translation'
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=TruncationStrategy.DO_NOT_TRUNCATE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
"""simple docstring"""
if getattr(self.tokenizer , '_build_translation_inputs' , _SCREAMING_SNAKE_CASE ):
return self.tokenizer._build_translation_inputs(
*_SCREAMING_SNAKE_CASE , return_tensors=self.framework , truncation=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE )
else:
return super()._parse_and_tokenize(*_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = super()._sanitize_parameters(**_SCREAMING_SNAKE_CASE )
if src_lang is not None:
_UpperCAmelCase = src_lang
if tgt_lang is not None:
_UpperCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCAmelCase = kwargs.get('task' , self.task )
_UpperCAmelCase = task.split('_' )
if task and len(_SCREAMING_SNAKE_CASE ) == 4:
# translation, XX, to YY
_UpperCAmelCase = items[1]
_UpperCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
return super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 371
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCAmelCase__ ( a__: NDArray[floataa] , a__: NDArray[floataa] , a__: list[int] , a__: int , ) -> list[float]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = coefficient_matrix.shape
_UpperCAmelCase , _UpperCAmelCase = constant_matrix.shape
if rowsa != colsa:
_UpperCAmelCase = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(a__ )
if colsa != 1:
_UpperCAmelCase = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(a__ )
if rowsa != rowsa:
_UpperCAmelCase = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(a__ )
if len(a__ ) != rowsa:
_UpperCAmelCase = (
'Number of initial values must be equal to number of rows in coefficient '
F'''matrix but received {len(a__ )} and {rowsa}'''
)
raise ValueError(a__ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
_UpperCAmelCase = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_UpperCAmelCase , _UpperCAmelCase = table.shape
strictly_diagonally_dominant(a__ )
# Iterates the whole matrix for given number of times
for _ in range(a__ ):
_UpperCAmelCase = []
for row in range(a__ ):
_UpperCAmelCase = 0
for col in range(a__ ):
if col == row:
_UpperCAmelCase = table[row][col]
elif col == cols - 1:
_UpperCAmelCase = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_UpperCAmelCase = (temp + val) / denom
new_val.append(a__ )
_UpperCAmelCase = new_val
return [float(a__ ) for i in new_val]
def lowerCAmelCase__ ( a__: NDArray[floataa] ) -> bool:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = table.shape
_UpperCAmelCase = True
for i in range(0 , a__ ):
_UpperCAmelCase = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185
| 0
|
"""simple docstring"""
def lowercase ( __snake_case : str , __snake_case : bool = False ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase_ : Any = F'''Expected string as input, found {type(__lowerCamelCase )}'''
raise ValueError(__lowerCamelCase )
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase_ : List[str] = F'''Expected boolean as use_pascal parameter, found {type(__lowerCamelCase )}'''
raise ValueError(__lowerCamelCase )
lowercase_ : Union[str, Any] = input_str.split('''_''' )
lowercase_ : List[Any] = 0 if use_pascal else 1
lowercase_ : List[Any] = words[start_index:]
lowercase_ : Dict = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowercase_ : Optional[Any] = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 33
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a : Union[str, Any] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Any = ['pixel_values']
def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : int = 0.9 , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : Union[int, float] = 1 / 255 , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Any , ) -> None:
super().__init__(**__lowercase )
__UpperCAmelCase : Tuple = size if size is not None else {"""shortest_edge""": 224}
__UpperCAmelCase : Union[str, Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
__UpperCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__UpperCAmelCase : Any = get_size_dict(__lowercase , param_name="""crop_size""" )
__UpperCAmelCase : Dict = do_resize
__UpperCAmelCase : Dict = size
__UpperCAmelCase : Tuple = crop_pct
__UpperCAmelCase : List[Any] = resample
__UpperCAmelCase : List[Any] = do_center_crop
__UpperCAmelCase : List[Any] = crop_size
__UpperCAmelCase : Any = do_rescale
__UpperCAmelCase : Tuple = rescale_factor
__UpperCAmelCase : int = do_normalize
__UpperCAmelCase : List[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__UpperCAmelCase : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self : Tuple , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[float] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[int] , ) -> np.ndarray:
__UpperCAmelCase : Tuple = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
__UpperCAmelCase : Union[str, Any] = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__UpperCAmelCase : Tuple = int(size["""height"""] / crop_pct )
else:
__UpperCAmelCase : str = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(__lowercase ) )
__UpperCAmelCase : str = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase )
else:
if "shortest_edge" in size:
__UpperCAmelCase : List[str] = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase )
elif "height" in size and "width" in size:
__UpperCAmelCase : int = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(__lowercase ) )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCAmelCase ( self : Dict , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Union[str, Any] , ) -> np.ndarray:
__UpperCAmelCase : Optional[Any] = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : np.ndarray , __lowercase : Union[int, float] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : int , ) -> int:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCAmelCase ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCAmelCase ( self : Any , __lowercase : ImageInput , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : int = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : bool = None , __lowercase : float = None , __lowercase : bool = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : ChannelDimension = ChannelDimension.FIRST , **__lowercase : List[str] , ) -> PIL.Image.Image:
__UpperCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
__UpperCAmelCase : Optional[Any] = resample if resample is not None else self.resample
__UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : Tuple = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase : Any = image_std if image_std is not None else self.image_std
__UpperCAmelCase : Optional[int] = size if size is not None else self.size
__UpperCAmelCase : Dict = get_size_dict(__lowercase , default_to_square=__lowercase )
__UpperCAmelCase : Tuple = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase : Tuple = get_size_dict(__lowercase , param_name="""crop_size""" )
__UpperCAmelCase : Dict = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__UpperCAmelCase : str = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__UpperCAmelCase : str = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
__UpperCAmelCase : Any = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
__UpperCAmelCase : List[str] = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__UpperCAmelCase : str = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__UpperCAmelCase : List[str] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__UpperCAmelCase : Any = {"""pixel_values""": images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 114
| 0
|
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ) -> Tuple:
def wrapper(*_lowerCamelCase : int ,**_lowerCamelCase : List[str] ):
_lowerCAmelCase : Any = timeit.default_timer()
_lowerCAmelCase : Tuple = func(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
_lowerCAmelCase : str = timeit.default_timer() - starttime
return delta
_lowerCAmelCase : Tuple = func.__name__
return wrapper
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Tuple=100 ,_lowerCamelCase : Optional[int]=None ) -> str:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : int = seq_shapes or {}
for i in range(SCREAMING_SNAKE_CASE__ ):
_lowerCAmelCase : Optional[int] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(SCREAMING_SNAKE_CASE__ ,_ArrayXD ):
_lowerCAmelCase : Tuple = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(SCREAMING_SNAKE_CASE__ ,datasets.Value ):
if v.dtype == "string":
_lowerCAmelCase : Dict = '''The small grey turtle was surprisingly fast when challenged.'''
else:
_lowerCAmelCase : List[str] = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(SCREAMING_SNAKE_CASE__ ,datasets.Sequence ):
while isinstance(SCREAMING_SNAKE_CASE__ ,datasets.Sequence ):
_lowerCAmelCase : Union[str, Any] = v.feature
_lowerCAmelCase : List[str] = seq_shapes[k]
_lowerCAmelCase : Any = np.random.rand(*SCREAMING_SNAKE_CASE__ ).astype(v.dtype )
_lowerCAmelCase : Dict = data
dummy_data.append((i, example) )
return dummy_data
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Tuple=100 ,_lowerCamelCase : Any=None ) -> Any:
_lowerCAmelCase : Tuple = generate_examples(SCREAMING_SNAKE_CASE__ ,num_examples=SCREAMING_SNAKE_CASE__ ,seq_shapes=SCREAMING_SNAKE_CASE__ )
with ArrowWriter(features=SCREAMING_SNAKE_CASE__ ,path=SCREAMING_SNAKE_CASE__ ) as writer:
for key, record in dummy_data:
_lowerCAmelCase : Dict = features.encode_example(SCREAMING_SNAKE_CASE__ )
writer.write(SCREAMING_SNAKE_CASE__ )
_lowerCAmelCase : List[str] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
_lowerCAmelCase : int = datasets.Dataset.from_file(filename=SCREAMING_SNAKE_CASE__ ,info=datasets.DatasetInfo(features=SCREAMING_SNAKE_CASE__ ) )
return dataset
| 367
|
"""simple docstring"""
import math
def SCREAMING_SNAKE_CASE ( ) -> None:
_lowerCAmelCase : Any = input("""Enter message: """ )
_lowerCAmelCase : List[Any] = int(input(f"Enter key [2-{len(_lowerCamelCase ) - 1}]: " ) )
_lowerCAmelCase : Optional[Any] = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
_lowerCAmelCase : Tuple = encrypt_message(_lowerCamelCase ,_lowerCamelCase )
elif mode.lower().startswith("""d""" ):
_lowerCAmelCase : Dict = decrypt_message(_lowerCamelCase ,_lowerCamelCase )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"Output:\n{text + '|'}" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : str ) -> str:
_lowerCAmelCase : Dict = [""""""] * key
for col in range(_lowerCamelCase ):
_lowerCAmelCase : List[str] = col
while pointer < len(_lowerCamelCase ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : str ) -> str:
_lowerCAmelCase : str = math.ceil(len(_lowerCamelCase ) / key )
_lowerCAmelCase : Union[str, Any] = key
_lowerCAmelCase : Any = (num_cols * num_rows) - len(_lowerCamelCase )
_lowerCAmelCase : Dict = [""""""] * num_cols
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Dict = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
_lowerCAmelCase : str = 0
row += 1
return "".join(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 126
| 0
|
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
_A = True
from torch.cuda.amp import autocast
_A = logging.getLogger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Any=None ):
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
UpperCAmelCase__ : Optional[bool] = field(
default=A_ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
UpperCAmelCase__ : Optional[float] = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
UpperCAmelCase__ : Optional[float] = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase__ : Optional[str] = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={"help": "The number of processes to use for the preprocessing."} , )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
UpperCAmelCase__ : List[str] = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : WavaVecaProcessor
UpperCAmelCase__ : Union[bool, str] = True
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[int] = None
def __call__( self , A_ ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__UpperCamelCase =[{'input_values': feature['input_values']} for feature in features]
__UpperCamelCase =[{'input_ids': feature['labels']} for feature in features]
__UpperCamelCase =self.processor.pad(
A_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
__UpperCamelCase =self.processor.pad(
labels=A_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
__UpperCamelCase =labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
__UpperCamelCase =labels
return batch
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def _a ( self , A_ , A_ ) -> torch.Tensor:
model.train()
__UpperCamelCase =self._prepare_inputs(A_ )
if self.use_amp:
with autocast():
__UpperCamelCase =self.compute_loss(A_ , A_ )
else:
__UpperCamelCase =self.compute_loss(A_ , A_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__UpperCamelCase =loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__UpperCamelCase =loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
__UpperCamelCase =loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(A_ ).backward()
elif self.use_apex:
with amp.scale_loss(A_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(A_ )
else:
loss.backward()
return loss.detach()
def _UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCamelCase =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__UpperCamelCase =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCamelCase =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__UpperCamelCase =datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
__UpperCamelCase =datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
__UpperCamelCase =F'[{"".join(data_args.chars_to_ignore )}]'
def remove_special_characters(SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =re.sub(SCREAMING_SNAKE_CASE__ , '' , batch['sentence'] ).lower() + ' '
return batch
__UpperCamelCase =train_dataset.map(SCREAMING_SNAKE_CASE__ , remove_columns=['sentence'] )
__UpperCamelCase =eval_dataset.map(SCREAMING_SNAKE_CASE__ , remove_columns=['sentence'] )
def extract_all_chars(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =' '.join(batch['text'] )
__UpperCamelCase =list(set(SCREAMING_SNAKE_CASE__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
__UpperCamelCase =train_dataset.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=-1 , keep_in_memory=SCREAMING_SNAKE_CASE__ , remove_columns=train_dataset.column_names , )
__UpperCamelCase =train_dataset.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=-1 , keep_in_memory=SCREAMING_SNAKE_CASE__ , remove_columns=eval_dataset.column_names , )
__UpperCamelCase =list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
__UpperCamelCase ={v: k for k, v in enumerate(SCREAMING_SNAKE_CASE__ )}
__UpperCamelCase =vocab_dict[' ']
del vocab_dict[" "]
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase =WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
__UpperCamelCase =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__UpperCamelCase =min(len(SCREAMING_SNAKE_CASE__ ) , data_args.max_train_samples )
__UpperCamelCase =train_dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
if data_args.max_val_samples is not None:
__UpperCamelCase =eval_dataset.select(range(data_args.max_val_samples ) )
__UpperCamelCase =torchaudio.transforms.Resample(4_80_00 , 1_60_00 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase , __UpperCamelCase =torchaudio.load(batch['path'] )
__UpperCamelCase =resampler(SCREAMING_SNAKE_CASE__ ).squeeze().numpy()
__UpperCamelCase =1_60_00
__UpperCamelCase =batch['text']
return batch
__UpperCamelCase =train_dataset.map(
SCREAMING_SNAKE_CASE__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__UpperCamelCase =eval_dataset.map(
SCREAMING_SNAKE_CASE__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(SCREAMING_SNAKE_CASE__ : List[Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F'Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'
__UpperCamelCase =processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(SCREAMING_SNAKE_CASE__ )
return batch
__UpperCamelCase =train_dataset.map(
SCREAMING_SNAKE_CASE__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=SCREAMING_SNAKE_CASE__ , num_proc=data_args.preprocessing_num_workers , )
__UpperCamelCase =eval_dataset.map(
SCREAMING_SNAKE_CASE__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=SCREAMING_SNAKE_CASE__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
__UpperCamelCase =datasets.load_metric('wer' )
def compute_metrics(SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =pred.predictions
__UpperCamelCase =np.argmax(SCREAMING_SNAKE_CASE__ , axis=-1 )
__UpperCamelCase =processor.tokenizer.pad_token_id
__UpperCamelCase =processor.batch_decode(SCREAMING_SNAKE_CASE__ )
# we do not want to group tokens when computing the metrics
__UpperCamelCase =processor.batch_decode(pred.label_ids , group_tokens=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =wer_metric.compute(predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__UpperCamelCase =DataCollatorCTCWithPadding(processor=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
# Initialize our Trainer
__UpperCamelCase =CTCTrainer(
model=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , compute_metrics=SCREAMING_SNAKE_CASE__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__UpperCamelCase =last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__UpperCamelCase =model_args.model_name_or_path
else:
__UpperCamelCase =None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__UpperCamelCase =trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model()
__UpperCamelCase =train_result.metrics
__UpperCamelCase =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE__ )
)
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics('train' , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('train' , SCREAMING_SNAKE_CASE__ )
trainer.save_state()
# Evaluation
__UpperCamelCase ={}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase =trainer.evaluate()
__UpperCamelCase =data_args.max_val_samples if data_args.max_val_samples is not None else len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE__ )
return results
if __name__ == "__main__":
main()
| 62
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 280
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if len(UpperCAmelCase_ ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(UpperCAmelCase_ )
or left < -len(UpperCAmelCase_ )
or right >= len(UpperCAmelCase_ )
or right < -len(UpperCAmelCase_ )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
UpperCAmelCase : Optional[int] = (left + right) >> 1 # the middle
UpperCAmelCase : Any = find_max(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # find max in range[left, mid]
UpperCAmelCase : Union[str, Any] = find_max(UpperCAmelCase_ , mid + 1 , UpperCAmelCase_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 280
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__lowercase : Optional[Any] = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
if attention_mask is None:
__a : Union[str, Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__a : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__a : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__a : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__a : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __UpperCamelCase :
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=16 , __a=2 , __a=4 , __a=4 , __a="gelu" , __a=0.1 , __a=0.1 , __a=32 , __a=2 , __a=1 , __a=0 , __a=0.02 , ):
'''simple docstring'''
__a : Dict = parent
__a : str = batch_size
__a : Tuple = seq_length
__a : str = is_training
__a : Optional[Any] = use_labels
__a : List[Any] = vocab_size
__a : Any = hidden_size
__a : Dict = num_hidden_layers
__a : int = num_attention_heads
__a : Optional[int] = intermediate_size
__a : Tuple = hidden_act
__a : List[Any] = hidden_dropout_prob
__a : Optional[int] = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : Any = eos_token_id
__a : str = pad_token_id
__a : Union[str, Any] = bos_token_id
__a : Dict = initializer_range
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__a : Optional[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__a : int = shift_tokens_right(__a , 1 , 2 )
__a : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__a , )
__a : str = prepare_blenderbot_inputs_dict(__a , __a , __a )
return config, inputs_dict
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : int = 20
__a : Any = model_class_name(__a )
__a : Union[str, Any] = model.encode(inputs_dict['input_ids'] )
__a , __a : Union[str, Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__a : Any = model.init_cache(decoder_input_ids.shape[0] , __a , __a )
__a : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__a : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a : str = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
__a : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__a : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , __a , decoder_attention_mask=__a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__a , )
__a : List[str] = model.decode(__a , __a )
__a : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = 20
__a : Optional[Any] = model_class_name(__a )
__a : Optional[Any] = model.encode(inputs_dict['input_ids'] )
__a , __a : List[str] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__a : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__a : Tuple = model.init_cache(decoder_input_ids.shape[0] , __a , __a )
__a : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
__a : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__a : int = model.decode(
decoder_input_ids[:, -1:] , __a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__a , decoder_position_ids=__a , )
__a : int = model.decode(__a , __a , decoder_attention_mask=__a )
__a : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
A_ = 99
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__a : int = input_ids.shape[0]
__a : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a , __a : Any = self._get_config_and_data()
__a : str = FlaxBlenderbotSmallForConditionalGeneration(__a )
__a : List[str] = lm_model(input_ids=__a )
__a : List[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__a : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(__a )
__a : Any = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__a : Union[str, Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__a : Any = lm_model(input_ids=__a , decoder_input_ids=__a )
__a : List[str] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__a : Dict = shift_tokens_right(__a , 1 , 2 )
__a : Any = np.equal(__a , 1 ).astype(np.floataa ).sum()
__a : Tuple = np.equal(__a , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__a , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase , lowerCAmelCase_ ):
A_ = True
A_ = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
A_ = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = FlaxBlenderbotSmallModelTester(self )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a : str = self._prepare_for_class(__a , __a )
__a : List[str] = model_class(__a )
@jax.jit
def encode_jitted(__a , __a=None , **__a ):
return model.encode(input_ids=__a , attention_mask=__a )
with self.subTest('JIT Enabled' ):
__a : str = encode_jitted(**__a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__a : List[str] = encode_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a : str = model_class(__a )
__a : Optional[Any] = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__a : int = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(__a , __a , __a ):
return model.decode(
decoder_input_ids=__a , decoder_attention_mask=__a , encoder_outputs=__a , )
with self.subTest('JIT Enabled' ):
__a : List[Any] = decode_jitted(**__a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__a : Optional[Any] = decode_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__a : Dict = model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__a : int = np.ones((1, 1) ) * model.config.eos_token_id
__a : Any = model(__a )
self.assertIsNotNone(__a )
| 27
|
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : int = 1_00 ):
lowercase_ :Tuple = n * (n + 1) * (2 * n + 1) / 6
lowercase_ :List[str] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 223
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 354
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
A__ = []
create_all_state(1 , UpperCAmelCase_ , UpperCAmelCase_ , [] , UpperCAmelCase_ )
return result
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[list[int]] , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(UpperCAmelCase_ , total_number - level + 2 ):
current_list.append(UpperCAmelCase_ )
create_all_state(i + 1 , UpperCAmelCase_ , level - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
current_list.pop()
def _snake_case ( UpperCAmelCase_ : list[list[int]] ):
for i in total_list:
print(*UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[Any] = 4
SCREAMING_SNAKE_CASE_ : int = 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = generate_all_combinations(n, k)
print_all_state(total_list)
| 335
|
"""simple docstring"""
import math
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: List[str]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
A__ = n
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # adjacency matrix for weight
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = w
def UpperCamelCase ( self: int ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase ( self: int , UpperCamelCase: List[str] , UpperCamelCase: Dict ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 335
| 1
|
from __future__ import annotations
from typing import TypedDict
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str
_UpperCAmelCase : int
def A_ ( _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(_UpperCAmelCase ) )]
def A_ ( _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
SCREAMING_SNAKE_CASE_: str = all_rotations(_UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
SCREAMING_SNAKE_CASE_: BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_UpperCAmelCase ),
}
return response
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
SCREAMING_SNAKE_CASE_: Optional[int] = int(_UpperCAmelCase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(_UpperCAmelCase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
SCREAMING_SNAKE_CASE_: Tuple = [""] * len(_UpperCAmelCase )
for _ in range(len(_UpperCAmelCase ) ):
for i in range(len(_UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_: Optional[Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCAmelCase : List[Any] = """Provide a string that I will generate its BWT transform: """
lowerCAmelCase : List[Any] = input(entry_msg).strip()
lowerCAmelCase : int = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result['bwt_string']}\''''
)
lowerCAmelCase : Any = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
f'''we get original string \'{original_string}\''''
)
| 127
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : Optional[int]=30 , lowerCAmelCase__ : Dict=400 , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Any=[0.5, 0.5, 0.5] , lowerCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Tuple=1 / 255 , lowerCAmelCase__ : int=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE_: Optional[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
SCREAMING_SNAKE_CASE_: str = parent
SCREAMING_SNAKE_CASE_: Tuple = batch_size
SCREAMING_SNAKE_CASE_: Tuple = num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] = min_resolution
SCREAMING_SNAKE_CASE_: Tuple = max_resolution
SCREAMING_SNAKE_CASE_: List[Any] = do_resize
SCREAMING_SNAKE_CASE_: Optional[int] = size
SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize
SCREAMING_SNAKE_CASE_: Any = image_mean
SCREAMING_SNAKE_CASE_: Dict = image_std
SCREAMING_SNAKE_CASE_: Tuple = do_rescale
SCREAMING_SNAKE_CASE_: int = rescale_factor
SCREAMING_SNAKE_CASE_: int = do_pad
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int=False):
if not batched:
SCREAMING_SNAKE_CASE_: List[str] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_: List[Any] = int(self.size["shortest_edge"] * h / w)
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE_: Any = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(self.size["shortest_edge"] * w / h)
else:
SCREAMING_SNAKE_CASE_: int = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE_: Dict = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE_: int = []
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
SCREAMING_SNAKE_CASE_: Tuple = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__: item[0])[0]
SCREAMING_SNAKE_CASE_: Optional[Any] = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Any = DeformableDetrImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = DeformableDetrImageProcessingTester(self)
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Dict = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean"))
self.assertTrue(hasattr(lowerCAmelCase__ , "image_std"))
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize"))
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize"))
self.assertTrue(hasattr(lowerCAmelCase__ , "do_rescale"))
self.assertTrue(hasattr(lowerCAmelCase__ , "do_pad"))
self.assertTrue(hasattr(lowerCAmelCase__ , "size"))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Dict = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333})
self.assertEqual(image_processor.do_pad , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__)
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84})
self.assertEqual(image_processor.do_pad , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
# Initialize image_processing
SCREAMING_SNAKE_CASE_: List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
SCREAMING_SNAKE_CASE_: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image)
# Test not batched input
SCREAMING_SNAKE_CASE_: Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = image_processing(lowerCAmelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE ( self : str):
# Initialize image_processing
SCREAMING_SNAKE_CASE_: Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
SCREAMING_SNAKE_CASE_: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray)
# Test not batched input
SCREAMING_SNAKE_CASE_: str = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_: Any = image_processing(lowerCAmelCase__ , return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
# Initialize image_processing
SCREAMING_SNAKE_CASE_: List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_: int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor)
# Test not batched input
SCREAMING_SNAKE_CASE_: Dict = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_: Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple):
# prepare image and target
SCREAMING_SNAKE_CASE_: Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r") as f:
SCREAMING_SNAKE_CASE_: str = json.loads(f.read())
SCREAMING_SNAKE_CASE_: Optional[int] = {"image_id": 3_9769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE_: str = DeformableDetrImageProcessor()
SCREAMING_SNAKE_CASE_: Dict = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="pt")
# verify pixel values
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4))
# verify area
SCREAMING_SNAKE_CASE_: int = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__))
# verify boxes
SCREAMING_SNAKE_CASE_: str = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3))
# verify image_id
SCREAMING_SNAKE_CASE_: str = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__))
# verify is_crowd
SCREAMING_SNAKE_CASE_: int = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__))
# verify class_labels
SCREAMING_SNAKE_CASE_: Tuple = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__))
# verify orig_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__))
# verify size
SCREAMING_SNAKE_CASE_: str = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple):
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE_: Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r") as f:
SCREAMING_SNAKE_CASE_: List[Any] = json.loads(f.read())
SCREAMING_SNAKE_CASE_: Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
SCREAMING_SNAKE_CASE_: int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic")
# encode them
SCREAMING_SNAKE_CASE_: Any = DeformableDetrImageProcessor(format="coco_panoptic")
SCREAMING_SNAKE_CASE_: Optional[Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="pt")
# verify pixel values
SCREAMING_SNAKE_CASE_: Dict = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4))
# verify area
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__))
# verify boxes
SCREAMING_SNAKE_CASE_: List[str] = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3))
# verify image_id
SCREAMING_SNAKE_CASE_: Any = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__))
# verify is_crowd
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__))
# verify class_labels
SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__))
# verify masks
SCREAMING_SNAKE_CASE_: Tuple = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase__)
# verify orig_size
SCREAMING_SNAKE_CASE_: str = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__))
# verify size
SCREAMING_SNAKE_CASE_: Optional[int] = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__))
| 127
| 1
|
from ..utils import DummyObject, requires_backends
class A ( metaclass=__UpperCAmelCase ):
__snake_case = ['torch', 'transformers', 'onnx']
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
class A ( metaclass=__UpperCAmelCase ):
__snake_case = ['torch', 'transformers', 'onnx']
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
class A ( metaclass=__UpperCAmelCase ):
__snake_case = ['torch', 'transformers', 'onnx']
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
class A ( metaclass=__UpperCAmelCase ):
__snake_case = ['torch', 'transformers', 'onnx']
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
class A ( metaclass=__UpperCAmelCase ):
__snake_case = ['torch', 'transformers', 'onnx']
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
class A ( metaclass=__UpperCAmelCase ):
__snake_case = ['torch', 'transformers', 'onnx']
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
| 278
|
def __UpperCamelCase ( _A ):
if not numbers:
return 0
if not isinstance(_A , (list, tuple) ) or not all(
isinstance(_A , _A ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCAmelCase_ = lowerCAmelCase_ = lowerCAmelCase_ = numbers[0]
for i in range(1 , len(_A ) ):
# update the maximum and minimum subarray products
lowerCAmelCase_ = numbers[i]
if number < 0:
lowerCAmelCase_ , lowerCAmelCase_ = min_till_now, max_till_now
lowerCAmelCase_ = max(_A , max_till_now * number )
lowerCAmelCase_ = min(_A , min_till_now * number )
# update the maximum product found till now
lowerCAmelCase_ = max(_A , _A )
return max_prod
| 278
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 356
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "swin2sr"
__UpperCAmelCase : List[Any] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any, UpperCAmelCase__ : Dict=6_4, UpperCAmelCase__ : List[Any]=1, UpperCAmelCase__ : Dict=3, UpperCAmelCase__ : Optional[Any]=1_8_0, UpperCAmelCase__ : Any=[6, 6, 6, 6, 6, 6], UpperCAmelCase__ : Dict=[6, 6, 6, 6, 6, 6], UpperCAmelCase__ : Tuple=8, UpperCAmelCase__ : Optional[int]=2.0, UpperCAmelCase__ : List[str]=True, UpperCAmelCase__ : Tuple=0.0, UpperCAmelCase__ : Optional[Any]=0.0, UpperCAmelCase__ : List[str]=0.1, UpperCAmelCase__ : Dict="gelu", UpperCAmelCase__ : Dict=False, UpperCAmelCase__ : Dict=0.02, UpperCAmelCase__ : Tuple=1E-5, UpperCAmelCase__ : str=2, UpperCAmelCase__ : str=1.0, UpperCAmelCase__ : Optional[int]="1conv", UpperCAmelCase__ : Dict="pixelshuffle", **UpperCAmelCase__ : List[Any], ):
super().__init__(**UpperCAmelCase__ )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(UpperCAmelCase__ )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = upscale
__lowercase = img_range
__lowercase = resi_connection
__lowercase = upsampler
| 144
| 0
|
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=[] ) -> List[str]:
UpperCAmelCase : Any = size[0] - overlap_pixels * 2
UpperCAmelCase : List[str] = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
UpperCAmelCase : List[Any] = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_5_5
UpperCAmelCase : List[str] = np.pad(__lowerCAmelCase , mode="""linear_ramp""" , pad_width=__lowerCAmelCase , end_values=0 )
if "l" in remove_borders:
UpperCAmelCase : Optional[int] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
UpperCAmelCase : Optional[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
UpperCAmelCase : List[str] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
UpperCAmelCase : Union[str, Any] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> int:
return max(__lowerCAmelCase , min(__lowerCAmelCase , __lowerCAmelCase ) )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Any:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = list(__lowerCAmelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
UpperCAmelCase : Tuple = clamp_rect(__lowerCAmelCase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
UpperCAmelCase : List[str] = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(__lowerCAmelCase , (original_slice, 0) )
return result
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
UpperCAmelCase : List[str] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
UpperCAmelCase : str = tile.crop(__lowerCAmelCase )
return tile
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[Any]:
UpperCAmelCase : Tuple = n % d
return n - divisor
class UpperCamelCase_ ( __lowerCamelCase ):
def __init__( self , A , A , A , A , A , A , A = 350 , ) -> List[str]:
super().__init__(
vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , unet=__lowercase , low_res_scheduler=__lowercase , scheduler=__lowercase , max_noise_level=__lowercase , )
def _lowercase( self , A , A , A , A , A , A , A , **A ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase : List[str] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
UpperCAmelCase : Union[str, Any] = add_overlap_rect(__lowercase , __lowercase , image.size )
UpperCAmelCase : Dict = image.crop(__lowercase )
UpperCAmelCase : List[str] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
UpperCAmelCase : List[Any] = translated_slice_x - (original_image_slice / 2)
UpperCAmelCase : Optional[Any] = max(0 , __lowercase )
UpperCAmelCase : List[Any] = squeeze_tile(__lowercase , __lowercase , __lowercase , __lowercase )
UpperCAmelCase : Dict = to_input.size
UpperCAmelCase : Tuple = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
UpperCAmelCase : Union[str, Any] = super(__lowercase , self ).__call__(image=__lowercase , **__lowercase ).images[0]
UpperCAmelCase : Tuple = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
UpperCAmelCase : Union[str, Any] = unsqueeze_tile(__lowercase , __lowercase )
UpperCAmelCase : Optional[int] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
UpperCAmelCase : List[str] = []
if x == 0:
remove_borders.append("""l""" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("""r""" )
if y == 0:
remove_borders.append("""t""" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("""b""" )
UpperCAmelCase : Dict = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__lowercase ) , mode="""L""" , )
final_image.paste(
__lowercase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __lowercase )
@torch.no_grad()
def __call__( self , A , A , A = 75 , A = 9.0 , A = 50 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = None , A = 1 , A = 128 , A = 32 , A = 32 , ) -> Tuple:
UpperCAmelCase : Optional[Any] = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) )
UpperCAmelCase : Tuple = math.ceil(image.size[0] / tile_size )
UpperCAmelCase : Any = math.ceil(image.size[1] / tile_size )
UpperCAmelCase : Dict = tcx * tcy
UpperCAmelCase : Optional[Any] = 0
for y in range(__lowercase ):
for x in range(__lowercase ):
self._process_tile(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , prompt=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , noise_level=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , )
current_count += 1
if callback is not None:
callback({"""progress""": current_count / total_tile_count, """image""": final_image} )
return final_image
def __lowerCamelCase ( ) -> Optional[Any]:
UpperCAmelCase : str = '''stabilityai/stable-diffusion-x4-upscaler'''
UpperCAmelCase : str = StableDiffusionTiledUpscalePipeline.from_pretrained(__lowerCAmelCase , revision="""fp16""" , torch_dtype=torch.floataa )
UpperCAmelCase : Tuple = pipe.to("""cuda""" )
UpperCAmelCase : Union[str, Any] = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" )
def callback(_lowercase ):
print(F'''progress: {obj['progress']:.4f}''' )
obj["image"].save("""diffusers_library_progress.jpg""" )
UpperCAmelCase : Tuple = pipe(image=__lowerCAmelCase , prompt="""Black font, white background, vector""" , noise_level=4_0 , callback=__lowerCAmelCase )
final_image.save("""diffusers_library.jpg""" )
if __name__ == "__main__":
main()
| 265
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A__ = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class a ( __lowerCamelCase ):
def __init__( self :str ,__lowercase :Dict ,__lowercase :str ,__lowercase :Any=None ,__lowercase :Union[str, Any]=1 ):
snake_case__ : Optional[int] = tokenizer
snake_case__ : List[Any] = dataset
snake_case__ : Any = len(__lowercase ) if n_tasks is None else n_tasks
snake_case__ : List[Any] = n_copies
def __iter__( self :Tuple ):
snake_case__ : Union[str, Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
snake_case__ : Optional[int] = self.tokenizer(__lowercase ,padding=__lowercase ,return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a ( __lowerCamelCase ):
def __init__( self :Union[str, Any] ,__lowercase :List[Any] ,__lowercase :int ,__lowercase :List[str] ):
snake_case__ : List[str] = start_length
snake_case__ : Any = eof_strings
snake_case__ : Optional[Any] = tokenizer
def __call__( self :str ,__lowercase :str ,__lowercase :Any ,**__lowercase :Optional[int] ):
snake_case__ : Tuple = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
snake_case__ : Union[str, Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__lowercase )
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : List[Any] = re.split('''(%s)''' % '''|'''.join(__lowerCAmelCase ) , __lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=20 , **__lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = defaultdict(__lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCAmelCase ) ):
with torch.no_grad():
snake_case__ : List[Any] = batch['''ids'''].shape[-1]
snake_case__ : str = accelerator.unwrap_model(__lowerCAmelCase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__lowerCAmelCase , **__lowerCAmelCase )
# each task is generated batch_size times
snake_case__ : str = batch['''task_id'''].repeat(__lowerCAmelCase )
snake_case__ : Optional[int] = accelerator.pad_across_processes(
__lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
snake_case__ , snake_case__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
snake_case__ : Union[str, Any] = generated_tokens.cpu().numpy()
snake_case__ : Optional[Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCAmelCase , __lowerCAmelCase ):
gen_token_dict[task].append(__lowerCAmelCase )
snake_case__ : List[Any] = [[] for _ in range(__lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
snake_case__ : int = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
code_gens[task].append(remove_last_block(__lowerCAmelCase ) )
return code_gens
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = HfArgumentParser(__lowerCAmelCase )
snake_case__ : Optional[Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
snake_case__ : Union[str, Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
snake_case__ : Union[str, Any] = '''false'''
if args.num_workers is None:
snake_case__ : Union[str, Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
snake_case__ : Dict = Accelerator()
set_seed(args.seed , device_specific=__lowerCAmelCase )
# Load model and tokenizer
snake_case__ : str = AutoTokenizer.from_pretrained(args.model_ckpt )
snake_case__ : List[str] = tokenizer.eos_token
snake_case__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
snake_case__ : List[Any] = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowerCAmelCase , __lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
snake_case__ : List[str] = load_dataset('''openai_humaneval''' )
snake_case__ : str = load_metric('''code_eval''' )
snake_case__ : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
snake_case__ : str = args.n_samples // args.batch_size
snake_case__ : Tuple = TokenizedDataset(__lowerCAmelCase , human_eval['''test'''] , n_copies=__lowerCAmelCase , n_tasks=__lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
snake_case__ : Tuple = DataLoader(__lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
snake_case__ : Optional[Any] = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
snake_case__ , snake_case__ : Optional[Any] = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : Dict = complete_code(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , n_tasks=__lowerCAmelCase , batch_size=args.batch_size , **__lowerCAmelCase , )
if accelerator.is_main_process:
snake_case__ : Optional[Any] = []
for task in tqdm(range(__lowerCAmelCase ) ):
snake_case__ : str = human_eval['''test'''][task]['''test''']
snake_case__ : int = f"""check({human_eval['test'][task]['entry_point']})"""
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
snake_case__ , snake_case__ : Any = code_eval_metric.compute(
references=__lowerCAmelCase , predictions=__lowerCAmelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 230
| 0
|
import torch
def A__ ( ) -> str:
if torch.cuda.is_available():
__snake_case: int = torch.cuda.device_count()
else:
__snake_case: Optional[Any] = 0
print(F'''Successfully ran on {num_gpus} GPUs''')
if __name__ == "__main__":
main()
| 354
|
from __future__ import annotations
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE__) -> List[str]:
return np.maximum(0 , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 293
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class __lowerCamelCase ( snake_case_ ):
'''simple docstring'''
a_ : Optional[int] = """openai-gpt"""
a_ : int = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Union[str, Any] , a_ : Dict=4_04_78 , a_ : str=5_12 , a_ : int=7_68 , a_ : List[Any]=12 , a_ : Dict=12 , a_ : Optional[Any]="gelu" , a_ : Any=0.1 , a_ : Optional[int]=0.1 , a_ : List[Any]=0.1 , a_ : Dict=1e-5 , a_ : List[Any]=0.02 , a_ : List[str]="cls_index" , a_ : Union[str, Any]=True , a_ : str=None , a_ : Union[str, Any]=True , a_ : Tuple=0.1 , **a_ : Union[str, Any] , ):
lowerCAmelCase_ : List[Any] = vocab_size
lowerCAmelCase_ : Dict = n_positions
lowerCAmelCase_ : int = n_embd
lowerCAmelCase_ : int = n_layer
lowerCAmelCase_ : Optional[int] = n_head
lowerCAmelCase_ : Tuple = afn
lowerCAmelCase_ : Dict = resid_pdrop
lowerCAmelCase_ : Any = embd_pdrop
lowerCAmelCase_ : Optional[Any] = attn_pdrop
lowerCAmelCase_ : Union[str, Any] = layer_norm_epsilon
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : Union[str, Any] = summary_type
lowerCAmelCase_ : List[Any] = summary_use_proj
lowerCAmelCase_ : Dict = summary_activation
lowerCAmelCase_ : Optional[Any] = summary_first_dropout
lowerCAmelCase_ : Optional[int] = summary_proj_to_labels
super().__init__(**a_ )
| 241
|
'''simple docstring'''
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = """Usage of script: script_name <size_of_canvas:int>"""
lowercase_ = [0] * 100 + [1] * 10
random.shuffle(choice)
def lowerCamelCase ( __lowerCamelCase : int ) ->list[list[bool]]:
_SCREAMING_SNAKE_CASE = [[False for i in range(__lowerCamelCase )] for j in range(__lowerCamelCase )]
return canvas
def lowerCamelCase ( __lowerCamelCase : list[list[bool]] ) ->None:
for i, row in enumerate(__lowerCamelCase ):
for j, _ in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = bool(random.getrandbits(1 ) )
def lowerCamelCase ( __lowerCamelCase : list[list[bool]] ) ->list[list[bool]]:
_SCREAMING_SNAKE_CASE = np.array(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__lowerCamelCase ):
for c, pt in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = __judge_point(
__lowerCamelCase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_SCREAMING_SNAKE_CASE = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_SCREAMING_SNAKE_CASE = current_canvas.tolist()
return return_canvas
def lowerCamelCase ( __lowerCamelCase : bool , __lowerCamelCase : list[list[bool]] ) ->bool:
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_SCREAMING_SNAKE_CASE = pt
if pt:
if alive < 2:
_SCREAMING_SNAKE_CASE = False
elif alive == 2 or alive == 3:
_SCREAMING_SNAKE_CASE = True
elif alive > 3:
_SCREAMING_SNAKE_CASE = False
else:
if alive == 3:
_SCREAMING_SNAKE_CASE = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ , lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(["""w""", """k"""])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 58
| 0
|
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_UpperCAmelCase : Any = logging.getLogger(__name__)
_UpperCAmelCase : Tuple = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_UpperCAmelCase : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowercase :
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_SCREAMING_SNAKE_CASE )} , )
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class lowercase :
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "The input training data file (a text file)."} )
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
__lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
__lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
__lowercase : bool = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether ot not to use whole word mask."} )
__lowercase : float = field(
default=0.1_5 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
__lowercase : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
__lowercase : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
__lowercase : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
__lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def A ( lowercase , lowercase , lowercase = False , lowercase = None , ) -> Optional[int]:
'''simple docstring'''
def _dataset(lowercase , lowercase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=lowercase , file_path=lowercase , block_size=args.block_size , ref_path=lowercase , )
return LineByLineTextDataset(tokenizer=lowercase , file_path=lowercase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowercase , file_path=lowercase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowercase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowercase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def A ( ) -> str:
'''simple docstring'''
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , lowercase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
UpperCamelCase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCamelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
UpperCamelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
UpperCamelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCamelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
UpperCamelCase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
UpperCamelCase = AutoModelWithLMHead.from_config(lowercase )
model.resize_token_embeddings(len(lowercase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
UpperCamelCase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
UpperCamelCase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
UpperCamelCase = (
get_dataset(lowercase , tokenizer=lowercase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
UpperCamelCase = (
get_dataset(lowercase , tokenizer=lowercase , evaluate=lowercase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
UpperCamelCase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowercase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
UpperCamelCase = DataCollatorForWholeWordMask(
tokenizer=lowercase , mlm_probability=data_args.mlm_probability )
else:
UpperCamelCase = DataCollatorForLanguageModeling(
tokenizer=lowercase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCamelCase = Trainer(
model=lowercase , args=lowercase , data_collator=lowercase , train_dataset=lowercase , eval_dataset=lowercase , prediction_loss_only=lowercase , )
# Training
if training_args.do_train:
UpperCamelCase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowercase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
UpperCamelCase = trainer.evaluate()
UpperCamelCase = math.exp(eval_output['eval_loss'] )
UpperCamelCase = {'perplexity': perplexity}
UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , lowercase , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(lowercase )
return results
def A ( lowercase ) -> Tuple:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 354
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , A_ = 768 , ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Parameter(torch.zeros(1 , A_ ) )
UpperCamelCase = nn.Parameter(torch.ones(1 , A_ ) )
def __UpperCamelCase ( self , A_ = None , A_ = None , ) -> Any:
"""simple docstring"""
UpperCamelCase = nn.Parameter(self.mean.to(A_ ).to(A_ ) )
UpperCamelCase = nn.Parameter(self.std.to(A_ ).to(A_ ) )
return self
def __UpperCamelCase ( self , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def __UpperCamelCase ( self , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = (embeds * self.std) + self.mean
return embeds
| 110
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 225
|
from math import sqrt
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase_ ( __UpperCAmelCase : int = 1_00_01 ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
while count != nth and number < 3:
number += 1
if is_prime(__UpperCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__UpperCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 225
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class _snake_case :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , _a=0 , ):
__magic_name__ : Optional[Any] = parent
__magic_name__ : Dict = batch_size
__magic_name__ : Optional[int] = seq_length
__magic_name__ : Any = is_training
__magic_name__ : List[Any] = use_input_mask
__magic_name__ : List[str] = use_token_type_ids
__magic_name__ : List[str] = use_labels
__magic_name__ : Dict = vocab_size
__magic_name__ : Any = hidden_size
__magic_name__ : Tuple = num_hidden_layers
__magic_name__ : int = num_attention_heads
__magic_name__ : str = intermediate_size
__magic_name__ : Dict = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Any = attention_probs_dropout_prob
__magic_name__ : Optional[Any] = max_position_embeddings
__magic_name__ : Optional[Any] = type_vocab_size
__magic_name__ : Any = type_sequence_label_size
__magic_name__ : Union[str, Any] = initializer_range
__magic_name__ : str = num_labels
__magic_name__ : int = num_choices
__magic_name__ : List[Any] = scope
__magic_name__ : Dict = projection_dim
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : List[Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
__magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[str] = None
if self.use_token_type_ids:
__magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : Optional[Any] = None
__magic_name__ : Optional[Any] = None
__magic_name__ : List[str] = None
if self.use_labels:
__magic_name__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : List[str] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
__magic_name__ : Union[str, Any] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a ):
__magic_name__ : List[Any] = TFDPRContextEncoder(config=_a )
__magic_name__ : List[str] = model(_a , attention_mask=_a , token_type_ids=_a )
__magic_name__ : List[str] = model(_a , token_type_ids=_a )
__magic_name__ : Dict = model(_a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a ):
__magic_name__ : Any = TFDPRQuestionEncoder(config=_a )
__magic_name__ : str = model(_a , attention_mask=_a , token_type_ids=_a )
__magic_name__ : int = model(_a , token_type_ids=_a )
__magic_name__ : Optional[Any] = model(_a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a , _a ):
__magic_name__ : Optional[Any] = TFDPRReader(config=_a )
__magic_name__ : Dict = model(_a , attention_mask=_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Optional[int] = config_and_inputs
__magic_name__ : Dict = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = TFDPRModelTester(self )
__magic_name__ : Dict = ConfigTester(self , config_class=_a , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*_a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : List[Any] = TFDPRContextEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Optional[Any] = TFDPRContextEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : List[str] = TFDPRQuestionEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Optional[Any] = TFDPRReader.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
__magic_name__ : int = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
__magic_name__ : Dict = model(_a )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
__magic_name__ : List[str] = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 41
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : List[Any] = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'roformer'
def __init__( self , _a=50_000 , _a=None , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1_536 , _a=2 , _a=0.02 , _a=1e-12 , _a=0 , _a=False , _a=True , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__magic_name__ : Tuple = vocab_size
__magic_name__ : Dict = hidden_size if embedding_size is None else embedding_size
__magic_name__ : int = hidden_size
__magic_name__ : int = num_hidden_layers
__magic_name__ : Union[str, Any] = num_attention_heads
__magic_name__ : Union[str, Any] = hidden_act
__magic_name__ : Optional[int] = intermediate_size
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : str = type_vocab_size
__magic_name__ : Dict = initializer_range
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : Optional[int] = rotary_value
__magic_name__ : List[Any] = use_cache
class _snake_case ( snake_case ):
@property
def SCREAMING_SNAKE_CASE ( self ):
if self.task == "multiple-choice":
__magic_name__ : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
__magic_name__ : str = {0: "batch", 1: "sequence"}
__magic_name__ : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 41
| 1
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if isinstance(snake_case , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __lowerCAmelCase :
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = {"""vision_model""": vision_model, """text_model""": text_model}
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
_lowerCAmelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
_lowerCAmelCase = after_output[0].numpy()
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case , 1e-5 )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase = model(
input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case )
_lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_snake_case ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase = to_atuple(vision_model.config.image_size )
_lowerCAmelCase = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(_snake_case , _snake_case , F'Difference between torch and flax is {diff} (>= {tol}).' )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_snake_case )
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_pretrained_model_and_inputs()
_lowerCAmelCase = model_a(**_snake_case )
_lowerCAmelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_snake_case )
_lowerCAmelCase = model_a(**_snake_case )
_lowerCAmelCase = after_outputs[0].numpy()
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case , 1e-5 )
@require_tf
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFViTModel(_snake_case , name="""vision_model""" )
_lowerCAmelCase = TFBertModel(_snake_case , name="""text_model""" )
return vision_model, text_model
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFViTModelTester(self )
_lowerCAmelCase = TFBertModelTester(self )
_lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase = model(
input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case )
_lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_snake_case ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCAmelCase = to_atuple(vision_model.config.image_size )
_lowerCAmelCase = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFDeiTModel(_snake_case , name="""vision_model""" )
_lowerCAmelCase = TFRobertaModel(_snake_case , name="""text_model""" )
return vision_model, text_model
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFDeiTModelTester(self )
_lowerCAmelCase = TFRobertaModelTester(self )
_lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFCLIPVisionModel(_snake_case , name="""vision_model""" )
_lowerCAmelCase = TFBertModel(_snake_case , name="""text_model""" )
return vision_model, text_model
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFCLIPVisionModelTester(self )
_lowerCAmelCase = TFBertModelTester(self )
_lowerCAmelCase = clip_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_snake_case )
_lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_lowerCAmelCase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_snake_case , padding=_snake_case , return_tensors="""np""" )
_lowerCAmelCase = model(**_snake_case )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_lowerCAmelCase = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _snake_case , atol=1e-3 ) )
| 82
|
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : List[str] = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowerCAmelCase__ : List[str] = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase__ : Any = in_proj_weight[
: encoder_config.hidden_size, :
]
lowerCAmelCase__ : int = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowerCAmelCase__ : int = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Any = dct.pop(__UpperCAmelCase )
lowerCAmelCase__ : Any = val
def lowercase_ ( __UpperCAmelCase ) -> int:
if "handwritten" in checkpoint_url:
lowerCAmelCase__ : Tuple = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCAmelCase__ : Optional[int] = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
lowerCAmelCase__ : int = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : Any = ViTConfig(image_size=384 , qkv_bias=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowerCAmelCase__ : List[str] = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
lowerCAmelCase__ : Dict = 1024
lowerCAmelCase__ : Tuple = 4096
lowerCAmelCase__ : Optional[Any] = 24
lowerCAmelCase__ : Tuple = 16
lowerCAmelCase__ : List[str] = 1024
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : List[str] = """relu"""
lowerCAmelCase__ : Dict = 1024
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : int = False
lowerCAmelCase__ : List[Any] = False
# load HuggingFace model
lowerCAmelCase__ : Tuple = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase )
lowerCAmelCase__ : int = TrOCRForCausalLM(__UpperCAmelCase )
lowerCAmelCase__ : Any = VisionEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
lowerCAmelCase__ : Union[str, Any] = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location="""cpu""" , check_hash=__UpperCAmelCase )["""model"""]
lowerCAmelCase__ : List[Any] = create_rename_keys(__UpperCAmelCase , __UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowerCAmelCase__ : int = state_dict.pop(__UpperCAmelCase )
if key.startswith("""decoder""" ) and "output_projection" not in key:
lowerCAmelCase__ : Optional[Any] = val
else:
lowerCAmelCase__ : int = val
# load state dict
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image
lowerCAmelCase__ : Any = ViTImageProcessor(size=encoder_config.image_size )
lowerCAmelCase__ : Optional[Any] = RobertaTokenizer.from_pretrained("""roberta-large""" )
lowerCAmelCase__ : List[str] = TrOCRProcessor(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Dict = processor(images=prepare_img(__UpperCAmelCase ) , return_tensors="""pt""" ).pixel_values
# verify logits
lowerCAmelCase__ : str = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowerCAmelCase__ : List[str] = model(pixel_values=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase )
lowerCAmelCase__ : str = outputs.logits
lowerCAmelCase__ : Union[str, Any] = torch.Size([1, 1, 5_0265] )
if "trocr-base-handwritten" in checkpoint_url:
lowerCAmelCase__ : Optional[int] = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
lowerCAmelCase__ : int = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
lowerCAmelCase__ : Optional[Any] = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
lowerCAmelCase__ : int = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , __UpperCAmelCase , atol=1E-3 ), "First elements of logits not as expected"
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCAmelCase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_A = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 242
| 0
|
from ..utils import DummyObject, requires_backends
class lowercase_ ( metaclass=_UpperCAmelCase ):
_lowerCamelCase = ['flax', 'transformers']
def __init__( self , *lowercase_ , **lowercase_ ):
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCamelCase ( cls , *lowercase_ , **lowercase_ ):
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCamelCase ( cls , *lowercase_ , **lowercase_ ):
requires_backends(cls , ["flax", "transformers"] )
class lowercase_ ( metaclass=_UpperCAmelCase ):
_lowerCamelCase = ['flax', 'transformers']
def __init__( self , *lowercase_ , **lowercase_ ):
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCamelCase ( cls , *lowercase_ , **lowercase_ ):
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCamelCase ( cls , *lowercase_ , **lowercase_ ):
requires_backends(cls , ["flax", "transformers"] )
class lowercase_ ( metaclass=_UpperCAmelCase ):
_lowerCamelCase = ['flax', 'transformers']
def __init__( self , *lowercase_ , **lowercase_ ):
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCamelCase ( cls , *lowercase_ , **lowercase_ ):
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCamelCase ( cls , *lowercase_ , **lowercase_ ):
requires_backends(cls , ["flax", "transformers"] )
class lowercase_ ( metaclass=_UpperCAmelCase ):
_lowerCamelCase = ['flax', 'transformers']
def __init__( self , *lowercase_ , **lowercase_ ):
requires_backends(self , ["flax", "transformers"] )
@classmethod
def UpperCamelCase ( cls , *lowercase_ , **lowercase_ ):
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def UpperCamelCase ( cls , *lowercase_ , **lowercase_ ):
requires_backends(cls , ["flax", "transformers"] )
| 355
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'M-CLIP'
def __init__( self , lowercase_=1_024 , lowercase_=768 , **lowercase_ ):
_snake_case : str = transformerDimSize
_snake_case : Union[str, Any] = imageDimSize
super().__init__(**lowercase_ )
class lowercase_ ( __snake_case ):
_lowerCamelCase = MCLIPConfig
def __init__( self , lowercase_ , *lowercase_ , **lowercase_ ):
super().__init__(lowercase_ , *lowercase_ , **lowercase_ )
_snake_case : List[Any] = XLMRobertaModel(lowercase_ )
_snake_case : int = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Tuple = self.transformer(input_ids=lowercase_ , attention_mask=lowercase_ )[0]
_snake_case : Tuple = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowercase_ ), embs
| 284
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.