code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a : Any = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a : Any = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
_a : List[str] = s_dict.pop(_lowerCAmelCase )
elif "subsample" in key:
_a : Dict = s_dict.pop(_lowerCAmelCase )
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a , _a : Optional[int] = emb.weight.shape
_a : List[str] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
_a : Tuple = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : Optional[int] = torch.load(_lowerCAmelCase , map_location='cpu' )
_a : Tuple = mam_aaa['args']
_a : Any = mam_aaa['model']
_a : Union[str, Any] = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(_lowerCAmelCase )
rename_keys(_lowerCAmelCase )
_a : int = state_dict['decoder.embed_tokens.weight'].shape[0]
_a : str = args.share_decoder_input_output_embed
_a : Union[str, Any] = [int(_lowerCAmelCase ) for i in args.conv_kernel_sizes.split(',' )]
_a : List[Any] = SpeechaTextConfig(
vocab_size=_lowerCAmelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(_lowerCAmelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=_lowerCAmelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=_lowerCAmelCase , num_beams=5 , max_length=2_0_0 , use_cache=_lowerCAmelCase , decoder_start_token_id=2 , early_stopping=_lowerCAmelCase , )
_a : Optional[int] = SpeechaTextForConditionalGeneration(_lowerCAmelCase )
_a , _a : int = model.model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0 and not set(_lowerCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
_a : Dict = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_a : Optional[int] = lm_head_weights
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase_ : Any = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 120 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_A: List[Any] = datasets.load_iris()
_A: Union[str, Any] = np.array(data["""data"""])
_A: Union[str, Any] = np.array(data["""target"""])
_A: Dict = data["""target_names"""]
_A , _A , _A , _A: List[str] = train_test_split(X, y)
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> Optional[int]:
return np.linalg.norm(np.array(_lowerCAmelCase ) - np.array(_lowerCAmelCase ) )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=5 )-> int:
__UpperCAmelCase = zip(_lowerCAmelCase , _lowerCAmelCase )
# List of distances of all points from the point to be classified
__UpperCAmelCase = []
for data_point in data:
__UpperCAmelCase = euclidean_distance(data_point[0] , _lowerCAmelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__UpperCAmelCase = [i[1] for i in sorted(_lowerCAmelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__UpperCAmelCase = Counter(_lowerCAmelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 126 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_a = 8
def lowerCamelCase__ ( __snake_case, __snake_case=BITS ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = x.device
_UpperCamelCase = (x * 2_55).int().clamp(0, 2_55 )
_UpperCamelCase = 2 ** torch.arange(bits - 1, -1, -1, device=__snake_case )
_UpperCamelCase = rearrange(__snake_case, '''d -> d 1 1''' )
_UpperCamelCase = rearrange(__snake_case, '''b c h w -> b c 1 h w''' )
_UpperCamelCase = ((x & mask) != 0).float()
_UpperCamelCase = rearrange(__snake_case, '''b c d h w -> b (c d) h w''' )
_UpperCamelCase = bits * 2 - 1
return bits
def lowerCamelCase__ ( __snake_case, __snake_case=BITS ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = x.device
_UpperCamelCase = (x > 0).int()
_UpperCamelCase = 2 ** torch.arange(bits - 1, -1, -1, device=__snake_case, dtype=torch.intaa )
_UpperCamelCase = rearrange(__snake_case, '''d -> d 1 1''' )
_UpperCamelCase = rearrange(__snake_case, '''b (c d) h w -> b c d h w''', d=8 )
_UpperCamelCase = reduce(x * mask, '''b c d h w -> b c h w''', '''sum''' )
return (dec / 2_55).clamp(0.0, 1.0 )
def lowerCamelCase__ ( self, __snake_case, __snake_case, __snake_case, __snake_case = 0.0, __snake_case = True, __snake_case=None, __snake_case = True, ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_UpperCamelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_UpperCamelCase = self.alphas_cumprod[timestep]
_UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_UpperCamelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_UpperCamelCase = self.bit_scale
if self.config.clip_sample:
_UpperCamelCase = torch.clamp(__snake_case, -scale, __snake_case )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_UpperCamelCase = self._get_variance(__snake_case, __snake_case )
_UpperCamelCase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_UpperCamelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_UpperCamelCase = model_output.device if torch.is_tensor(__snake_case ) else '''cpu'''
_UpperCamelCase = torch.randn(model_output.shape, dtype=model_output.dtype, generator=__snake_case ).to(__snake_case )
_UpperCamelCase = self._get_variance(__snake_case, __snake_case ) ** 0.5 * eta * noise
_UpperCamelCase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__snake_case, pred_original_sample=__snake_case )
def lowerCamelCase__ ( self, __snake_case, __snake_case, __snake_case, __snake_case="epsilon", __snake_case=None, __snake_case = True, ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
_UpperCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_UpperCamelCase , _UpperCamelCase = torch.split(__snake_case, sample.shape[1], dim=1 )
else:
_UpperCamelCase = None
# 1. compute alphas, betas
_UpperCamelCase = self.alphas_cumprod[t]
_UpperCamelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one
_UpperCamelCase = 1 - alpha_prod_t
_UpperCamelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_UpperCamelCase = model_output
else:
raise ValueError(F'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
_UpperCamelCase = self.bit_scale
if self.config.clip_sample:
_UpperCamelCase = torch.clamp(__snake_case, -scale, __snake_case )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCamelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_UpperCamelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_UpperCamelCase = 0
if t > 0:
_UpperCamelCase = torch.randn(
model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=__snake_case ).to(model_output.device )
_UpperCamelCase = (self._get_variance(__snake_case, predicted_variance=__snake_case ) ** 0.5) * noise
_UpperCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__snake_case, pred_original_sample=__snake_case )
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a , __a = 1.0 , ) -> str:
'''simple docstring'''
super().__init__()
_UpperCamelCase = bit_scale
_UpperCamelCase = (
ddim_bit_scheduler_step if isinstance(__a , __a) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__a , scheduler=__a)
@torch.no_grad()
def __call__( self , __a = 2_56 , __a = 2_56 , __a = 50 , __a = None , __a = 1 , __a = "pil" , __a = True , **__a , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
_UpperCamelCase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__a , )
_UpperCamelCase = decimal_to_bits(__a) * self.bit_scale
_UpperCamelCase = latents.to(self.device)
self.scheduler.set_timesteps(__a)
for t in self.progress_bar(self.scheduler.timesteps):
# predict the noise residual
_UpperCamelCase = self.unet(__a , __a).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(__a , __a , __a).prev_sample
_UpperCamelCase = bits_to_decimal(__a)
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__a)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a)
| 78 |
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
_a = get_logger(__name__)
class _UpperCAmelCase:
def __init__( self , __a , __a=None) -> Dict:
'''simple docstring'''
_UpperCamelCase = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__'''):
setattr(self , __a , getattr(__a , __a))
_UpperCamelCase = module._original_module if isinstance(__a , _PatchedModuleObj) else module
class _UpperCAmelCase:
lowercase__ = []
def __init__( self , __a , __a , __a , __a=None) -> List[str]:
'''simple docstring'''
_UpperCamelCase = obj
_UpperCamelCase = target
_UpperCamelCase = new
_UpperCamelCase = target.split('''.''')[0]
_UpperCamelCase = {}
_UpperCamelCase = attrs or []
def __enter__( self) -> int:
'''simple docstring'''
*_UpperCamelCase , _UpperCamelCase = self.target.split('''.''')
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__a)):
try:
_UpperCamelCase = import_module('''.'''.join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_UpperCamelCase = getattr(self.obj , __a)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__a , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
_UpperCamelCase = obj_attr
# patch at top level
setattr(self.obj , __a , _PatchedModuleObj(__a , attrs=self.attrs))
_UpperCamelCase = getattr(self.obj , __a)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__a , __a , _PatchedModuleObj(getattr(__a , __a , __a) , attrs=self.attrs))
_UpperCamelCase = getattr(__a , __a)
# finally set the target attribute
setattr(__a , __a , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_UpperCamelCase = getattr(import_module('''.'''.join(__a)) , __a)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , __a) is attr_value:
_UpperCamelCase = getattr(self.obj , __a)
setattr(self.obj , __a , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_UpperCamelCase = globals()['''__builtins__'''][target_attr]
setattr(self.obj , __a , self.new)
else:
raise RuntimeError(F'''Tried to patch attribute {target_attr} instead of a submodule.''')
def __exit__( self , *__a) -> Tuple:
'''simple docstring'''
for attr in list(self.original):
setattr(self.obj , __a , self.original.pop(__a))
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
self.__enter__()
self._active_patches.append(self)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 78 | 1 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCAmelCase ():
"""simple docstring"""
a__ = HfArgumentParser(_lowercase )
a__ = parser.parse_args_into_dataclasses()[0]
a__ = TensorFlowBenchmark(args=_lowercase )
try:
a__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a__ = "Arg --no_{0} is no longer used, please use --no-{0} instead."
a__ = " ".join(str(_lowercase ).split(" " )[:-1] )
a__ = ""
a__ = eval(str(_lowercase ).split(" " )[-1] )
a__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_lowercase )
if len(_lowercase ) > 0:
a__ = full_error_msg + begin_error_msg + str(_lowercase )
raise ValueError(_lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 331 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCAmelCase (_lowercase , _lowercase=0.999 , _lowercase="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowercase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowercase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
a__ = []
for i in range(_lowercase ):
a__ = i / num_diffusion_timesteps
a__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowercase ) / alpha_bar_fn(_lowercase ) , _lowercase ) )
return torch.tensor(_lowercase , dtype=torch.floataa )
class lowerCamelCase__ ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase__ = [e.name for e in KarrasDiffusionSchedulers]
UpperCamelCase__ = 2
@register_to_config
def __init__( self : Optional[int] ,a__ : int = 10_00 ,a__ : float = 0.0_0085 ,a__ : float = 0.012 ,a__ : str = "linear" ,a__ : Optional[Union[np.ndarray, List[float]]] = None ,a__ : str = "epsilon" ,a__ : str = "linspace" ,a__ : int = 0 ,):
if trained_betas is not None:
a__ = torch.tensor(a__ ,dtype=torch.floataa )
elif beta_schedule == "linear":
a__ = torch.linspace(a__ ,a__ ,a__ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a__ = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,a__ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a__ = betas_for_alpha_bar(a__ )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
a__ = 1.0 - self.betas
a__ = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(a__ ,a__ ,a__ )
def lowerCAmelCase_ ( self : str ,a__ : str ,a__ : List[Any]=None ):
if schedule_timesteps is None:
a__ = self.timesteps
a__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
a__ = 1 if len(a__ ) > 1 else 0
else:
a__ = timestep.cpu().item() if torch.is_tensor(a__ ) else timestep
a__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase_ ( self : List[Any] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase_ ( self : Union[str, Any] ,a__ : torch.FloatTensor ,a__ : Union[float, torch.FloatTensor] ,):
a__ = self.index_for_timestep(a__ )
if self.state_in_first_order:
a__ = self.sigmas[step_index]
else:
a__ = self.sigmas_interpol[step_index]
a__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase_ ( self : int ,a__ : int ,a__ : Union[str, torch.device] = None ,a__ : Optional[int] = None ,):
a__ = num_inference_steps
a__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
a__ = np.linspace(0 ,num_train_timesteps - 1 ,a__ ,dtype=a__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
a__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a__ = (np.arange(0 ,a__ ) * step_ratio).round()[::-1].copy().astype(a__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
a__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a__ = (np.arange(a__ ,0 ,-step_ratio )).round().copy().astype(a__ )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
a__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
a__ = torch.from_numpy(np.log(a__ ) ).to(a__ )
a__ = np.interp(a__ ,np.arange(0 ,len(a__ ) ) ,a__ )
a__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
a__ = torch.from_numpy(a__ ).to(device=a__ )
# interpolate sigmas
a__ = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp()
a__ = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
a__ = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(a__ ).startswith("mps" ):
# mps does not support float64
a__ = torch.from_numpy(a__ ).to(a__ ,dtype=torch.floataa )
else:
a__ = torch.from_numpy(a__ ).to(a__ )
# interpolate timesteps
a__ = self.sigma_to_t(a__ ).to(a__ ,dtype=timesteps.dtype )
a__ = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten()
a__ = torch.cat([timesteps[:1], interleaved_timesteps] )
a__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
a__ = defaultdict(a__ )
def lowerCAmelCase_ ( self : Dict ,a__ : Any ):
# get log sigma
a__ = sigma.log()
# get distribution
a__ = log_sigma - self.log_sigmas[:, None]
# get sigmas range
a__ = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
a__ = low_idx + 1
a__ = self.log_sigmas[low_idx]
a__ = self.log_sigmas[high_idx]
# interpolate sigmas
a__ = (low - log_sigma) / (low - high)
a__ = w.clamp(0 ,1 )
# transform interpolation to time range
a__ = (1 - w) * low_idx + w * high_idx
a__ = t.view(sigma.shape )
return t
@property
def lowerCAmelCase_ ( self : Tuple ):
return self.sample is None
def lowerCAmelCase_ ( self : Any ,a__ : Union[torch.FloatTensor, np.ndarray] ,a__ : Union[float, torch.FloatTensor] ,a__ : Union[torch.FloatTensor, np.ndarray] ,a__ : bool = True ,):
a__ = self.index_for_timestep(a__ )
# advance index counter by 1
a__ = timestep.cpu().item() if torch.is_tensor(a__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
a__ = self.sigmas[step_index]
a__ = self.sigmas_interpol[step_index + 1]
a__ = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
a__ = self.sigmas[step_index - 1]
a__ = self.sigmas_interpol[step_index]
a__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
a__ = 0
a__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
a__ = sigma_hat if self.state_in_first_order else sigma_interpol
a__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
a__ = sigma_hat if self.state_in_first_order else sigma_interpol
a__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
a__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
a__ = sigma_interpol - sigma_hat
# store for 2nd order step
a__ = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
a__ = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
a__ = sigma_next - sigma_hat
a__ = self.sample
a__ = None
a__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a__ )
def lowerCAmelCase_ ( self : str ,a__ : torch.FloatTensor ,a__ : torch.FloatTensor ,a__ : torch.FloatTensor ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
a__ = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a__ ):
# mps does not support float64
a__ = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
a__ = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
a__ = self.timesteps.to(original_samples.device )
a__ = timesteps.to(original_samples.device )
a__ = [self.index_for_timestep(a__ ,a__ ) for t in timesteps]
a__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
a__ = sigma.unsqueeze(-1 )
a__ = original_samples + noise * sigma
return noisy_samples
def __len__( self : str ):
return self.config.num_train_timesteps
| 331 | 1 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : str , __snake_case : str=2 , __snake_case : Any=3 , __snake_case : Optional[Any]=64 , __snake_case : List[Any]=None ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = np.random.default_rng(__snake_case )
UpperCAmelCase_ : str = length
UpperCAmelCase_ : Union[str, Any] = rng.normal(size=(length,) ).astype(np.floataa )
UpperCAmelCase_ : int = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Tuple ):
'''simple docstring'''
return self.length
def __getitem__( self : List[Any] , __snake_case : List[str] ):
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class lowerCAmelCase__( torch.nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , __snake_case : List[Any]=0 , __snake_case : Union[str, Any]=0 , __snake_case : str=False ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCAmelCase_ : Any = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCAmelCase_ : List[Any] = True
def _lowerCamelCase ( self : List[Any] , __snake_case : Any=None ):
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
UpperCAmelCase_ : Any = False
return x * self.a[0] + self.b[0]
class lowerCAmelCase__( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __snake_case : List[str]=0 , __snake_case : Optional[int]=0 , __snake_case : Union[str, Any]=False ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : Optional[int] = torch.nn.Parameter(torch.tensor(__snake_case ).float() )
UpperCAmelCase_ : Dict = torch.nn.Parameter(torch.tensor(__snake_case ).float() )
UpperCAmelCase_ : List[str] = True
def _lowerCamelCase ( self : Any , __snake_case : str=None ):
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
UpperCAmelCase_ : int = False
return x * self.a + self.b
def snake_case_ ( __lowercase , __lowercase = 1_6 ):
from datasets import load_dataset
from transformers import AutoTokenizer
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase_ : Dict = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
UpperCAmelCase_ : Optional[Any] = load_dataset('''csv''' , data_files=__lowercase )
UpperCAmelCase_ : Optional[int] = datasets['''train'''].unique('''label''' )
UpperCAmelCase_ : str = {v: i for i, v in enumerate(__lowercase )}
def tokenize_function(__lowercase ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Optional[Any] = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowercase , max_length=__lowercase , padding='''max_length''' )
if "label" in examples:
UpperCAmelCase_ : int = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ : int = datasets.map(
__lowercase , batched=__lowercase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(__lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowercase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return tokenizer.pad(__lowercase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ : Tuple = DataLoader(tokenized_datasets['''train'''] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=2 )
UpperCAmelCase_ : Dict = DataLoader(tokenized_datasets['''validation'''] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=1 )
return train_dataloader, eval_dataloader | 641 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : int
A_ : int
A_ : float
A_ : float
A_ : Tuple[int]
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = torch.arange(self.height * self.width )
UpperCAmelCase_ : Any = torch.stack(
[
pixel_indices % self.width,
torch.div(__snake_case , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ : List[Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(__snake_case ) )
UpperCAmelCase_ : str = self.get_image_coords()
UpperCAmelCase_ : List[str] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Tuple = self.get_camera_rays(__snake_case )
UpperCAmelCase_ : Union[str, Any] = rays.view(__snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _lowerCamelCase ( self : Dict , __snake_case : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : int = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : str = coords.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = self.resolution()
UpperCAmelCase_ : Optional[Any] = self.fov()
UpperCAmelCase_ : int = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[Any] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Optional[Any] = fracs.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(__snake_case , 1 , 3 )
+ self.x.view(__snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ : Optional[int] = torch.stack(
[
torch.broadcast_to(self.origin.view(__snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__snake_case , *__snake_case , 2 , 3 )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__snake_case , height=__snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : Tuple = np.array([np.sin(__lowercase ), np.cos(__lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : str = -z * 4
UpperCAmelCase_ : List[Any] = np.array([np.cos(__lowercase ), -np.sin(__lowercase ), 0.0] )
UpperCAmelCase_ : Tuple = np.cross(__lowercase , __lowercase )
origins.append(__lowercase )
xs.append(__lowercase )
ys.append(__lowercase )
zs.append(__lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , width=__lowercase , height=__lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowercase )) , ) | 641 | 1 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Tuple = BartphoTokenizer
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = True
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
super().setUp()
_lowerCAmelCase :int = ['▁This', '▁is', '▁a', '▁t', 'est']
_lowerCAmelCase :str = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
_lowerCAmelCase :List[str] = {'unk_token': '<unk>'}
_lowerCAmelCase :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
_lowerCAmelCase :Optional[int] = BartphoTokenizer(_UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , **_UpperCAmelCase: Dict ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :int = 'This is a là test'
_lowerCAmelCase :Optional[int] = 'This is a<unk><unk> test'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :Any = BartphoTokenizer(_UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
_lowerCAmelCase :Dict = 'This is a là test'
_lowerCAmelCase :Dict = '▁This ▁is ▁a ▁l à ▁t est'.split()
_lowerCAmelCase :int = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase :int = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) | 687 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = list(__magic_name__ )
_lowerCAmelCase :Dict = list(__magic_name__ )
_lowerCAmelCase :Any = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count += 1
_lowerCAmelCase :Union[str, Any] = '_'
if count > 1:
return False
else:
return "".join(__magic_name__ )
def UpperCamelCase_( __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :int = []
while True:
_lowerCAmelCase :str = ['$'] * len(__magic_name__ )
_lowerCAmelCase :Optional[int] = []
for i in range(len(__magic_name__ ) ):
for j in range(i + 1 , len(__magic_name__ ) ):
_lowerCAmelCase :int = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCAmelCase :str = '*'
_lowerCAmelCase :Union[str, Any] = '*'
temp.append('X' )
for i in range(len(__magic_name__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__magic_name__ ) == 0:
return pi
_lowerCAmelCase :Any = list(set(__magic_name__ ) )
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Sequence[float] ):
"""simple docstring"""
_lowerCAmelCase :str = []
for minterm in minterms:
_lowerCAmelCase :Any = ''
for _ in range(__magic_name__ ):
_lowerCAmelCase :Tuple = str(minterm % 2 ) + string
minterm //= 2
temp.append(__magic_name__ )
return temp
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = list(__magic_name__ )
_lowerCAmelCase :List[Any] = list(__magic_name__ )
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase_( __magic_name__ : list[list[int]] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = []
_lowerCAmelCase :List[str] = [0] * len(__magic_name__ )
for i in range(len(chart[0] ) ):
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Optional[Any] = -1
for j in range(len(__magic_name__ ) ):
if chart[j][i] == 1:
count += 1
_lowerCAmelCase :List[Any] = j
if count == 1:
_lowerCAmelCase :Dict = 1
for i in range(len(__magic_name__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__magic_name__ ) ):
_lowerCAmelCase :Dict = 0
temp.append(prime_implicants[i] )
while True:
_lowerCAmelCase :Dict = 0
_lowerCAmelCase :Any = -1
_lowerCAmelCase :Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = chart[i].count(1 )
if count_n > max_n:
_lowerCAmelCase :Optional[Any] = count_n
_lowerCAmelCase :Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__magic_name__ ) ):
_lowerCAmelCase :str = 0
def UpperCamelCase_( __magic_name__ : list[str] , __magic_name__ : list[str] ):
"""simple docstring"""
_lowerCAmelCase :str = [[0 for x in range(len(__magic_name__ ) )] for x in range(len(__magic_name__ ) )]
for i in range(len(__magic_name__ ) ):
_lowerCAmelCase :Tuple = prime_implicants[i].count('_' )
for j in range(len(__magic_name__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , __magic_name__ ):
_lowerCAmelCase :str = 1
return chart
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Tuple = int(input('Enter the no. of variables\n' ) )
_lowerCAmelCase :Tuple = [
float(__magic_name__ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_lowerCAmelCase :List[str] = decimal_to_binary(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Any = check(__magic_name__ )
print('Prime Implicants are:' )
print(__magic_name__ )
_lowerCAmelCase :List[Any] = prime_implicant_chart(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Tuple = selection(__magic_name__ , __magic_name__ )
print('Essential Prime Implicants are:' )
print(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 687 | 1 |
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__lowerCamelCase : Optional[int] =1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__lowerCamelCase : Tuple =0
while number > 0:
__lowerCamelCase : List[Any] =number % 10
sum_of_digits += last_digit
__lowerCamelCase : List[str] =number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int = 100 ):
'''simple docstring'''
__lowerCamelCase : int =factorial(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Dict =split_and_add(SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 711 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__snake_case : Dict = CycleDiffusionPipeline
__snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
__snake_case : Any = PipelineTesterMixin.required_optional_params - {"""latents"""}
__snake_case : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
__snake_case : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__snake_case : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self :Dict ):
torch.manual_seed(0 )
__lowerCamelCase : List[str] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__lowerCamelCase : List[Any] =DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0 )
__lowerCamelCase : Optional[int] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase : int =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCamelCase : Optional[Any] =CLIPTextModel(__lowercase )
__lowerCamelCase : Tuple =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowerCamelCase : Optional[int] ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowercase ( self :Union[str, Any] , __lowercase :Optional[int] , __lowercase :str=0 ):
__lowerCamelCase : List[Any] =floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowerCamelCase : str =image / 2 + 0.5
if str(__lowercase ).startswith('''mps''' ):
__lowerCamelCase : Union[str, Any] =torch.manual_seed(__lowercase )
else:
__lowerCamelCase : Any =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCamelCase : Dict ={
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : int ='''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Tuple =self.get_dummy_components()
__lowerCamelCase : List[str] =CycleDiffusionPipeline(**__lowercase )
__lowerCamelCase : Tuple =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCamelCase : List[Any] =self.get_dummy_inputs(__lowercase )
__lowerCamelCase : int =pipe(**__lowercase )
__lowerCamelCase : Dict =output.images
__lowerCamelCase : Union[str, Any] =images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase : Optional[Any] =np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __lowercase ( self :str ):
__lowerCamelCase : str =self.get_dummy_components()
for name, module in components.items():
if hasattr(__lowercase , '''half''' ):
__lowerCamelCase : Union[str, Any] =module.half()
__lowerCamelCase : int =CycleDiffusionPipeline(**__lowercase )
__lowerCamelCase : List[str] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCamelCase : Optional[int] =self.get_dummy_inputs(__lowercase )
__lowerCamelCase : Dict =pipe(**__lowercase )
__lowerCamelCase : List[str] =output.images
__lowerCamelCase : Dict =images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase : str =np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowercase ( self :Optional[Any] ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def __lowercase ( self :Dict ):
return super().test_inference_batch_single_identical()
@skip_mps
def __lowercase ( self :Optional[Any] ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __lowercase ( self :str ):
return super().test_save_load_optional_components()
@skip_mps
def __lowercase ( self :Dict ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self :Dict ):
__lowerCamelCase : Dict =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__lowerCamelCase : Union[str, Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__lowerCamelCase : Any =init_image.resize((512, 512) )
__lowerCamelCase : Optional[Any] ='''CompVis/stable-diffusion-v1-4'''
__lowerCamelCase : Optional[Any] =DDIMScheduler.from_pretrained(__lowercase , subfolder='''scheduler''' )
__lowerCamelCase : Optional[Any] =CycleDiffusionPipeline.from_pretrained(
__lowercase , scheduler=__lowercase , safety_checker=__lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
__lowerCamelCase : Dict ='''A black colored car'''
__lowerCamelCase : Union[str, Any] ='''A blue colored car'''
__lowerCamelCase : Dict =torch.manual_seed(0 )
__lowerCamelCase : Tuple =pipe(
prompt=__lowercase , source_prompt=__lowercase , image=__lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__lowercase , output_type='''np''' , )
__lowerCamelCase : Tuple =output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def __lowercase ( self :Any ):
__lowerCamelCase : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__lowerCamelCase : List[Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__lowerCamelCase : Optional[Any] =init_image.resize((512, 512) )
__lowerCamelCase : Any ='''CompVis/stable-diffusion-v1-4'''
__lowerCamelCase : List[Any] =DDIMScheduler.from_pretrained(__lowercase , subfolder='''scheduler''' )
__lowerCamelCase : str =CycleDiffusionPipeline.from_pretrained(__lowercase , scheduler=__lowercase , safety_checker=__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
__lowerCamelCase : Any ='''A black colored car'''
__lowerCamelCase : int ='''A blue colored car'''
__lowerCamelCase : Tuple =torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] =pipe(
prompt=__lowercase , source_prompt=__lowercase , image=__lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__lowercase , output_type='''np''' , )
__lowerCamelCase : Dict =output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 363 | 0 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ) -> List[str]:
super().__init__()
_lowercase = nn.Linear(3 ,4 )
_lowercase = nn.BatchNormad(4 )
_lowercase = nn.Linear(4 ,5 )
def __UpperCAmelCase ( self : int ,__A : Tuple ) -> int:
return self.lineara(self.batchnorm(self.lineara(__A ) ) )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ,__A : Union[str, Any] ,*__A : List[str] ,**__A : Dict ) -> Tuple:
return (args[0] + 1,) + args[1:], kwargs
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ,__A : str ,__A : Union[str, Any] ) -> Optional[Any]:
return output + 1
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[Any] ) -> Any:
_lowercase = ModelForTest()
_lowercase = ModelHook()
add_hook_to_module(__A ,__A )
self.assertEqual(test_model._hf_hook ,__A )
self.assertTrue(hasattr(__A ,'_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ ,'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) ,['x'] )
remove_hook_from_module(__A )
self.assertFalse(hasattr(__A ,'_hf_hook' ) )
self.assertFalse(hasattr(__A ,'_old_forward' ) )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
_lowercase = ModelForTest()
_lowercase = ModelHook()
add_hook_to_module(__A ,__A )
add_hook_to_module(__A ,__A ,append=__A )
self.assertEqual(isinstance(test_model._hf_hook ,__A ) ,__A )
self.assertEqual(len(test_model._hf_hook.hooks ) ,2 )
self.assertTrue(hasattr(__A ,'_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ ,'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) ,['x'] )
remove_hook_from_module(__A )
self.assertFalse(hasattr(__A ,'_hf_hook' ) )
self.assertFalse(hasattr(__A ,'_old_forward' ) )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
_lowercase = ModelForTest()
_lowercase = torch.randn(2 ,3 )
_lowercase = test_model(x + 1 )
_lowercase = test_model(x + 2 )
_lowercase = PreForwardHook()
add_hook_to_module(__A ,__A )
_lowercase = test_model(__A )
self.assertTrue(torch.allclose(__A ,__A ,atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_lowercase = PreForwardHook()
add_hook_to_module(__A ,__A )
_lowercase = test_model(__A )
self.assertTrue(torch.allclose(__A ,__A ,atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_lowercase = SequentialHook(PreForwardHook() ,PreForwardHook() )
add_hook_to_module(__A ,__A )
_lowercase = test_model(__A )
assert torch.allclose(__A ,__A ,atol=1e-5 )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
_lowercase = ModelForTest()
_lowercase = torch.randn(2 ,3 )
_lowercase = test_model(__A )
_lowercase = PostForwardHook()
add_hook_to_module(__A ,__A )
_lowercase = test_model(__A )
self.assertTrue(torch.allclose(__A ,output + 1 ,atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_lowercase = PostForwardHook()
add_hook_to_module(__A ,__A )
_lowercase = test_model(__A )
self.assertTrue(torch.allclose(__A ,output + 1 ,atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_lowercase = SequentialHook(PostForwardHook() ,PostForwardHook() )
add_hook_to_module(__A ,__A )
_lowercase = test_model(__A )
assert torch.allclose(__A ,output + 2 ,atol=1e-5 )
def __UpperCAmelCase ( self : Any ) -> str:
_lowercase = ModelForTest()
_lowercase = torch.randn(2 ,3 )
_lowercase = test_model(__A )
_lowercase = PostForwardHook()
add_hook_to_module(__A ,__A )
_lowercase = test_model(__A )
self.assertTrue(torch.allclose(__A ,output + 1 ) )
self.assertTrue(outputa.requires_grad )
_lowercase = True
_lowercase = test_model(__A )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
_lowercase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara ,AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm ,AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara ,AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device ,torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device(0 ) )
self.assertEqual(model.lineara.weight.device ,torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_lowercase = torch.randn(2 ,3 )
_lowercase = model(__A )
self.assertEqual(output.device ,torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__A ,AlignDevicesHook(io_same_device=__A ) )
_lowercase = torch.randn(2 ,3 ).to(0 )
_lowercase = model(__A )
self.assertEqual(output.device ,torch.device(0 ) )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
_lowercase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# This will move each submodule on different devices
_lowercase = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara ,AlignDevicesHook(**__A ) )
add_hook_to_module(model.batchnorm ,AlignDevicesHook(**__A ) )
add_hook_to_module(model.lineara ,AlignDevicesHook(**__A ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowercase = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device ,__A )
_lowercase = torch.randn(2 ,3 )
_lowercase = model(__A )
self.assertEqual(output.device ,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# Now test with buffers included in the offload
_lowercase = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara ,AlignDevicesHook(**__A ) )
add_hook_to_module(model.batchnorm ,AlignDevicesHook(**__A ) )
add_hook_to_module(model.lineara ,AlignDevicesHook(**__A ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device('meta' ) )
_lowercase = torch.randn(2 ,3 )
_lowercase = model(__A )
self.assertEqual(output.device ,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
_lowercase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# This will move each submodule on different devices
_lowercase = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(__A ,execution_device=__A ,offload=__A )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowercase = torch.device(__A )
self.assertEqual(model.batchnorm.running_mean.device ,__A )
_lowercase = torch.randn(2 ,3 )
_lowercase = model(__A )
self.assertEqual(output.device ,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(__A ,execution_device=__A ,offload=__A ,offload_buffers=__A )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device('meta' ) )
_lowercase = torch.randn(2 ,3 )
_lowercase = model(__A )
self.assertEqual(output.device ,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
_lowercase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# This will move each submodule on different devices
_lowercase = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
__A ,execution_device=__A ,offload=__A ,weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowercase = torch.device(__A )
self.assertEqual(model.batchnorm.running_mean.device ,__A )
_lowercase = torch.randn(2 ,3 )
_lowercase = model(__A )
self.assertEqual(output.device ,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__A ,execution_device=__A ,offload=__A ,weights_map=model.state_dict() ,offload_buffers=__A ,)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device('meta' ) )
_lowercase = torch.randn(2 ,3 )
_lowercase = model(__A )
self.assertEqual(output.device ,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) ) | 67 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case ( lowerCAmelCase ):
def __init__( self ,snake_case ,snake_case=13 ,snake_case=7 ,snake_case=True ,snake_case=True ,snake_case=True ,snake_case=True ,snake_case=99 ,snake_case=32 ,snake_case=5 ,snake_case=4 ,snake_case=37 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=512 ,snake_case=16 ,snake_case=2 ,snake_case=0.02 ,snake_case=False ,snake_case=True ,snake_case="None" ,snake_case=3 ,snake_case=4 ,snake_case=None ,):
'''simple docstring'''
lowercase : List[Any] = parent
lowercase : int = batch_size
lowercase : Any = seq_length
lowercase : Dict = is_training
lowercase : int = use_input_mask
lowercase : Optional[int] = use_token_type_ids
lowercase : Optional[Any] = use_labels
lowercase : Union[str, Any] = vocab_size
lowercase : List[str] = hidden_size
lowercase : str = num_hidden_layers
lowercase : Union[str, Any] = num_attention_heads
lowercase : List[str] = intermediate_size
lowercase : Union[str, Any] = hidden_act
lowercase : List[str] = hidden_dropout_prob
lowercase : Tuple = attention_probs_dropout_prob
lowercase : Optional[int] = max_position_embeddings
lowercase : Tuple = type_vocab_size
lowercase : List[str] = type_sequence_label_size
lowercase : Union[str, Any] = initializer_range
lowercase : str = num_labels
lowercase : Tuple = num_choices
lowercase : List[str] = relative_attention
lowercase : List[Any] = position_biased_input
lowercase : List[str] = pos_att_type
lowercase : Tuple = scope
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase : List[str] = None
if self.use_input_mask:
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
lowercase : List[Any] = None
if self.use_token_type_ids:
lowercase : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowercase : Optional[int] = None
lowercase : Dict = None
lowercase : str = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase : int = ids_tensor([self.batch_size] ,self.num_choices )
lowercase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,pos_att_type=self.pos_att_type ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) ,[] )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Tuple = DebertaVaModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : str = model(snake_case ,attention_mask=snake_case ,token_type_ids=snake_case )[0]
lowercase : Tuple = model(snake_case ,token_type_ids=snake_case )[0]
lowercase : Optional[Any] = model(snake_case )[0]
self.parent.assertListEqual(list(sequence_output.size() ) ,[self.batch_size, self.seq_length, self.hidden_size] )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = DebertaVaForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : List[str] = model(snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Any = self.num_labels
lowercase : Dict = DebertaVaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase : Dict = model(snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,labels=snake_case )
self.parent.assertListEqual(list(result.logits.size() ) ,[self.batch_size, self.num_labels] )
self.check_loss_output(snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : int = self.num_labels
lowercase : Optional[int] = DebertaVaForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : List[Any] = model(snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Tuple = DebertaVaForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : Tuple = model(
snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,start_positions=snake_case ,end_positions=snake_case ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = DebertaVaForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : str = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase : Optional[Any] = model(
snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,labels=snake_case ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : List[str] = config_and_inputs
lowercase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Dict= (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_a : str= (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_a : Any= True
_a : List[Any]= False
_a : List[Any]= False
_a : Any= False
_a : Optional[Any]= False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = DebertaVaModelTester(self )
lowercase : List[Any] = ConfigTester(self ,config_class=snake_case ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Optional[Any] = DebertaVaModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
lowercase : Any = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
lowercase : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase : str = model(snake_case ,attention_mask=snake_case )[0]
# compare the actual values for a slice.
lowercase : Tuple = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,snake_case ,atol=1e-4 ) ,f"{output[:, 1:4, 1:4]}" )
| 336 | 0 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__UpperCAmelCase = """src/diffusers"""
# Matches is_xxx_available()
__UpperCAmelCase = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
__UpperCAmelCase = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
__UpperCAmelCase = """
{0} = None
"""
__UpperCAmelCase = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
__UpperCAmelCase = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def snake_case_ (__A : Union[str, Any] ) -> Optional[Any]:
__lowerCAmelCase : Optional[Any] = _re_backend.findall(__A )
if len(__A ) == 0:
return None
return "_and_".join(__A )
def snake_case_ () -> str:
with open(os.path.join(__A , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__lowerCAmelCase : str = f.readlines()
# Get to the point we do the actual imports for type checking
__lowerCAmelCase : Optional[Any] = 0
__lowerCAmelCase : Optional[Any] = {}
# Go through the end of the file
while line_index < len(__A ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__lowerCAmelCase : str = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
__lowerCAmelCase : List[Any] = []
# Until we unindent, add backend objects to the list
while line_index < len(__A ) and len(lines[line_index] ) > 1:
__lowerCAmelCase : Optional[int] = lines[line_index]
__lowerCAmelCase : str = _re_single_line_import.search(__A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__A ) > 0:
__lowerCAmelCase : str = objects
else:
line_index += 1
return backend_specific_objects
def snake_case_ (__A : List[Any] , __A : Optional[Any] ) -> Optional[Any]:
if name.isupper():
return DUMMY_CONSTANT.format(__A )
elif name.islower():
return DUMMY_FUNCTION.format(__A , __A )
else:
return DUMMY_CLASS.format(__A , __A )
def snake_case_ (__A : int=None ) -> Any:
if backend_specific_objects is None:
__lowerCAmelCase : List[Any] = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__lowerCAmelCase : Optional[int] = {}
for backend, objects in backend_specific_objects.items():
__lowerCAmelCase : Any = """[""" + """, """.join(f'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]"""
__lowerCAmelCase : Dict = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__A , __A ) for o in objects] )
__lowerCAmelCase : Any = dummy_file
return dummy_files
def snake_case_ (__A : Union[str, Any]=False ) -> int:
__lowerCAmelCase : Optional[Any] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__lowerCAmelCase : Optional[Any] = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
__lowerCAmelCase : Any = os.path.join(__A , """utils""" )
__lowerCAmelCase : Optional[int] = {
backend: os.path.join(__A , f'''dummy_{short_names.get(__A , __A )}_objects.py''' )
for backend in dummy_files.keys()
}
__lowerCAmelCase : Optional[Any] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__A ):
with open(__A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__lowerCAmelCase : str = f.read()
else:
__lowerCAmelCase : Union[str, Any] = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py as the main '''
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
f'''diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py. Run `make fix-copies` '''
"""to fix this.""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__UpperCAmelCase = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 218 |
import os
import numpy
import onnx
def snake_case_ (__A : Optional[int] , __A : Union[str, Any] ) -> Dict:
__lowerCAmelCase : str = a.name
__lowerCAmelCase : Union[str, Any] = b.name
__lowerCAmelCase : Union[str, Any] = """"""
__lowerCAmelCase : Optional[Any] = """"""
__lowerCAmelCase : Union[str, Any] = a == b
__lowerCAmelCase : Optional[Any] = name_a
__lowerCAmelCase : List[str] = name_b
return res
def snake_case_ (__A : Optional[int] , __A : int , __A : Tuple ) -> Tuple:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__A , __A )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __A , __A )
_graph_replace_input_with(node_proto.attribute[1].g , __A , __A )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __A , __A )
def snake_case_ (__A : List[Any] , __A : List[Any] , __A : Dict ) -> Optional[Any]:
for n in graph_proto.node:
_node_replace_input_with(__A , __A , __A )
def snake_case_ (__A : Optional[Any] , __A : Union[str, Any] , __A : Any ) -> int:
__lowerCAmelCase : List[Any] = list(model.graph.initializer )
__lowerCAmelCase : List[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__lowerCAmelCase : Optional[int] = inits[i].name
__lowerCAmelCase : Dict = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __A , __A )
def snake_case_ (__A : List[Any] ) -> str:
__lowerCAmelCase : str = os.path.dirname(__A )
__lowerCAmelCase : int = os.path.basename(__A )
__lowerCAmelCase : List[Any] = onnx.load(os.path.join(__A , __A ) )
__lowerCAmelCase : str = list(model.graph.initializer )
__lowerCAmelCase : int = set()
__lowerCAmelCase : Union[str, Any] = {}
__lowerCAmelCase : Optional[int] = []
__lowerCAmelCase : Union[str, Any] = 0
for i in range(len(__A ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__A ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__A )
dup_set.add(__A )
__lowerCAmelCase : Optional[Any] = inits[j].data_type
__lowerCAmelCase : int = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print("""unexpected data type: """ , __A )
total_reduced_size += mem_size
__lowerCAmelCase : str = inits[i].name
__lowerCAmelCase : List[Any] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__A )
else:
__lowerCAmelCase : int = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , """GB""" )
__lowerCAmelCase : Optional[int] = sorted(__A )
_remove_dup_initializers_from_model(__A , __A , __A )
__lowerCAmelCase : str = """optimized_""" + model_file_name
__lowerCAmelCase : List[Any] = os.path.join(__A , __A )
onnx.save(__A , __A )
return new_model
| 218 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Optional[int] = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 120 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a : Dict = args.pruning_method
_a : Optional[Any] = args.threshold
_a : Union[str, Any] = args.model_name_or_path.rstrip('/' )
_a : Dict = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_a : List[str] = torch.load(os.path.join(A , 'pytorch_model.bin' ) )
_a : str = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_a : List[Any] = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_a : List[str] = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_a : Optional[Any] = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_a : Tuple = MagnitudeBinarizer.apply(inputs=A , threshold=A )
_a : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_a : str = name[:-6]
_a : int = model[f'''{prefix_}mask_scores''']
_a : List[str] = TopKBinarizer.apply(A , A )
_a : Tuple = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_a : str = name[:-6]
_a : List[str] = model[f'''{prefix_}mask_scores''']
_a : Tuple = ThresholdBinarizer.apply(A , A , A )
_a : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_a : Optional[int] = name[:-6]
_a : Any = model[f'''{prefix_}mask_scores''']
_a , _a : Optional[int] = -0.1, 1.1
_a : List[str] = torch.sigmoid(A )
_a : Dict = s * (r - l) + l
_a : int = s_bar.clamp(min=0.0 , max=1.0 )
_a : List[str] = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_a : Tuple = os.path.join(
os.path.dirname(A ) , f'''bertarized_{os.path.basename(A )}''' )
if not os.path.isdir(A ):
shutil.copytree(A , A )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(A , os.path.join(A , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
main(args)
| 120 | 1 |
from __future__ import annotations
def lowercase ( a , a , a ):
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140 |
from collections import defaultdict
class _UpperCAmelCase :
def __init__( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int):
SCREAMING_SNAKE_CASE_ :Dict = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
SCREAMING_SNAKE_CASE_ :Optional[int] = [
[-1 for i in range(total + 1)] for j in range(2 ** len(UpperCAmelCase))
]
SCREAMING_SNAKE_CASE_ :Tuple = defaultdict(UpperCAmelCase) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
SCREAMING_SNAKE_CASE_ :Tuple = (1 << len(UpperCAmelCase)) - 1
def _snake_case ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Dict):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
SCREAMING_SNAKE_CASE_ :Optional[int] = self.count_ways_until(UpperCAmelCase , task_no + 1)
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1)
# save the value.
SCREAMING_SNAKE_CASE_ :Union[str, Any] = total_ways_util
return self.dp[mask][task_no]
def _snake_case ( self : str , UpperCAmelCase : Optional[int]):
# Store the list of persons for each task
for i in range(len(UpperCAmelCase)):
for j in task_performed[i]:
self.task[j].append(UpperCAmelCase)
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
SCREAMING_SNAKE_CASE__ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 140 | 1 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
lowerCamelCase : Any = '''.'''
if __name__ == "__main__":
lowerCamelCase : Dict = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
lowerCamelCase : Optional[Any] = []
lowerCamelCase : str = []
with open(doctest_file_path) as fp:
for line in fp:
lowerCamelCase : Union[str, Any] = line.strip()
lowerCamelCase : Dict = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
lowerCamelCase : Tuple = '''\n'''.join(non_existent_paths)
raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 367 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =[-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetDownsampleBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =[0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =[0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase , __lowercase =super().prepare_init_args_and_inputs_for_common()
__lowercase =3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =[0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=_lowerCAmelCase)
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase , __lowercase =super().prepare_init_args_and_inputs_for_common()
__lowercase =3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent')
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =[0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =[-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=_lowerCAmelCase)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =[0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return super().get_dummy_input(include_temb=_lowerCAmelCase)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase ={
'in_channels': 3_2,
'out_channels': 3_2,
}
__lowercase =self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =[1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def __lowerCamelCase ( self : Any):
'''simple docstring'''
return super().get_dummy_input(include_temb=_lowerCAmelCase)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase ={
'in_channels': 3_2,
'out_channels': 3_2,
}
__lowercase =self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =[0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaD # noqa F405
lowerCAmelCase__ = """mid"""
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase ={
'in_channels': 3_2,
'temb_channels': 1_2_8,
}
__lowercase =self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =[-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase , __lowercase =super().prepare_init_args_and_inputs_for_common()
__lowercase =3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =[0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
@property
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=_lowerCAmelCase)
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase , __lowercase =super().prepare_init_args_and_inputs_for_common()
__lowercase =3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =[0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : Any):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =[-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetUpsampleBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =[0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase , __lowercase =super().prepare_init_args_and_inputs_for_common()
__lowercase =3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =[-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase , include_encoder_hidden_states=_lowerCAmelCase)
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase , __lowercase =super().prepare_init_args_and_inputs_for_common()
__lowercase =3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =[0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase)
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent')
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =[0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =[-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =[0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return super().get_dummy_input(include_temb=_lowerCAmelCase)
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase ={'in_channels': 3_2, 'out_channels': 3_2}
__lowercase =self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =[0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return super().get_dummy_input(include_temb=_lowerCAmelCase)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase ={'in_channels': 3_2, 'out_channels': 3_2}
__lowercase =self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =[0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(_lowerCAmelCase)
| 474 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
class a__ ( a__ ):
def __init__( self :Optional[Any] , _lowerCamelCase :Dict , _lowerCamelCase :Tuple ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self :Any , _lowerCamelCase :List[str] = 1 , _lowerCamelCase :Dict = 100 , _lowerCamelCase :Any = None , _lowerCamelCase :Dict = None , _lowerCamelCase :Optional[Any] = True , ):
'''simple docstring'''
if audio_length_in_s is None:
UpperCamelCase_ : Any =self.unet.config.sample_size / self.unet.config.sample_rate
UpperCamelCase_ : Optional[Any] =audio_length_in_s * self.unet.config.sample_rate
UpperCamelCase_ : List[Any] =2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
UpperCamelCase_ : str =int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
UpperCamelCase_ : Dict =(
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
UpperCamelCase_ : List[Any] =int(lowerCamelCase_ )
UpperCamelCase_ : Dict =next(iter(self.unet.parameters() ) ).dtype
UpperCamelCase_ : Any =(batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCamelCase_ : Any =randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ , device=audio.device )
UpperCamelCase_ : List[str] =self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase_ : Union[str, Any] =self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCamelCase_ : List[str] =self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
UpperCamelCase_ : Optional[int] =audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCamelCase_ : Tuple =audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 710 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__SCREAMING_SNAKE_CASE = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__SCREAMING_SNAKE_CASE = TaTokenizerFast
__SCREAMING_SNAKE_CASE = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 395 | 0 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = XCLIPTextConfig()
# derive patch size from model name
SCREAMING_SNAKE_CASE_ : Dict = model_name.find("patch" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
SCREAMING_SNAKE_CASE_ : List[str] = XCLIPVisionConfig(patch_size=A__ , num_frames=A__ )
if "large" in model_name:
SCREAMING_SNAKE_CASE_ : Dict = 7_6_8
SCREAMING_SNAKE_CASE_ : int = 3_0_7_2
SCREAMING_SNAKE_CASE_ : List[str] = 1_2
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1_0_2_4
SCREAMING_SNAKE_CASE_ : Any = 4_0_9_6
SCREAMING_SNAKE_CASE_ : List[str] = 1_6
SCREAMING_SNAKE_CASE_ : Tuple = 2_4
SCREAMING_SNAKE_CASE_ : str = 7_6_8
SCREAMING_SNAKE_CASE_ : Dict = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3_3_6
SCREAMING_SNAKE_CASE_ : Optional[Any] = XCLIPConfig.from_text_vision_configs(A__ , A__ )
if "large" in model_name:
SCREAMING_SNAKE_CASE_ : int = 7_6_8
return config
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
"""simple docstring"""
if name == "token_embedding.weight":
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace("c_proj" , "fc2" )
if name.startswith("transformer.resblocks" ):
SCREAMING_SNAKE_CASE_ : List[str] = name.replace("transformer.resblocks" , "text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
SCREAMING_SNAKE_CASE_ : str = name.replace("attn.out_proj" , "self_attn.out_proj" )
if "ln_final" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace("ln_final" , "text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
SCREAMING_SNAKE_CASE_ : Tuple = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
SCREAMING_SNAKE_CASE_ : int = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
SCREAMING_SNAKE_CASE_ : Any = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers" )
if "visual.conv1" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace("visual.ln_pre" , "vision_model.pre_layernorm" )
if "visual.ln_post" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace("visual.ln_post" , "vision_model.post_layernorm" )
if "visual.proj" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace("visual.proj" , "visual_projection.weight" )
if "text_projection" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace("text_projection" , "text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace("prompts_visual_proj" , "prompts_visual_projection" )
if "prompts_visual_ln" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace("prompts_visual_ln" , "prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace("positional" , "position" )
if name.startswith("mit.resblocks" ):
SCREAMING_SNAKE_CASE_ : Any = name.replace("mit.resblocks" , "mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace("prompts_generator.norm" , "prompts_generator.layernorm" )
return name
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Tuple:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ : str = orig_state_dict.pop(A__ )
if "attn.in_proj" in key:
SCREAMING_SNAKE_CASE_ : int = key.split("." )
if key.startswith("visual" ):
SCREAMING_SNAKE_CASE_ : str = key_split[3]
SCREAMING_SNAKE_CASE_ : str = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
SCREAMING_SNAKE_CASE_ : Optional[int] = val[
:dim, :
]
SCREAMING_SNAKE_CASE_ : int = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE_ : List[str] = val[
-dim:, :
]
else:
SCREAMING_SNAKE_CASE_ : str = val[
:dim
]
SCREAMING_SNAKE_CASE_ : Tuple = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val[
-dim:
]
else:
if "weight" in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val[
:dim, :
]
SCREAMING_SNAKE_CASE_ : List[Any] = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE_ : Optional[int] = val[
-dim:, :
]
else:
SCREAMING_SNAKE_CASE_ : List[Any] = val[:dim]
SCREAMING_SNAKE_CASE_ : List[str] = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE_ : List[str] = val[-dim:]
elif key.startswith("mit" ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = key_split[2]
SCREAMING_SNAKE_CASE_ : List[Any] = config.vision_config.mit_hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE_ : Optional[Any] = val[:dim, :]
SCREAMING_SNAKE_CASE_ : Any = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_ : List[str] = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_ : str = val[:dim]
SCREAMING_SNAKE_CASE_ : Tuple = val[dim : dim * 2]
SCREAMING_SNAKE_CASE_ : Optional[Any] = val[-dim:]
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key_split[2]
SCREAMING_SNAKE_CASE_ : Optional[Any] = config.text_config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE_ : Dict = val[:dim, :]
SCREAMING_SNAKE_CASE_ : List[str] = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE_ : Any = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_ : List[Any] = val[:dim]
SCREAMING_SNAKE_CASE_ : Dict = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE_ : List[str] = val[-dim:]
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = rename_key(A__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
SCREAMING_SNAKE_CASE_ : Tuple = val.T
SCREAMING_SNAKE_CASE_ : Optional[Any] = val
return orig_state_dict
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict:
"""simple docstring"""
if num_frames == 8:
SCREAMING_SNAKE_CASE_ : Dict = """eating_spaghetti_8_frames.npy"""
elif num_frames == 1_6:
SCREAMING_SNAKE_CASE_ : Any = """eating_spaghetti.npy"""
elif num_frames == 3_2:
SCREAMING_SNAKE_CASE_ : Tuple = """eating_spaghetti_32_frames.npy"""
SCREAMING_SNAKE_CASE_ : Tuple = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=A__ , repo_type="dataset" , )
SCREAMING_SNAKE_CASE_ : Any = np.load(A__ )
return list(A__ )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
SCREAMING_SNAKE_CASE_ : Any = model_to_url[model_name]
SCREAMING_SNAKE_CASE_ : Optional[int] = 8
if "16-frames" in model_name:
SCREAMING_SNAKE_CASE_ : Dict = 1_6
elif "shot" in model_name:
SCREAMING_SNAKE_CASE_ : Tuple = 3_2
SCREAMING_SNAKE_CASE_ : Tuple = get_xclip_config(A__ , A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = XCLIPModel(A__ )
model.eval()
if "drive" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : Tuple = """pytorch_model.bin"""
gdown.cached_download(A__ , A__ , quiet=A__ )
SCREAMING_SNAKE_CASE_ : Any = torch.load(A__ , map_location="cpu" )["""model"""]
else:
SCREAMING_SNAKE_CASE_ : List[str] = torch.hub.load_state_dict_from_url(A__ )["""model"""]
SCREAMING_SNAKE_CASE_ : List[Any] = convert_state_dict(A__ , A__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = XCLIPModel(A__ )
SCREAMING_SNAKE_CASE_ : str = model.load_state_dict(A__ , strict=A__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
SCREAMING_SNAKE_CASE_ : str = 3_3_6 if model_name == """xclip-large-patch14-16-frames""" else 2_2_4
SCREAMING_SNAKE_CASE_ : Dict = VideoMAEImageProcessor(size=A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
SCREAMING_SNAKE_CASE_ : Any = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
SCREAMING_SNAKE_CASE_ : Any = XCLIPProcessor(image_processor=A__ , tokenizer=A__ )
SCREAMING_SNAKE_CASE_ : Dict = prepare_video(A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=A__ , return_tensors="pt" , padding=A__ )
print("Shape of pixel values:" , inputs.pixel_values.shape )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**A__ )
# Verify outputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = outputs.logits_per_video
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logits_per_video.softmax(dim=1 )
print("Probs:" , A__ )
# kinetics-400
if model_name == "xclip-base-patch32":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
SCREAMING_SNAKE_CASE_ : str = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(F"Model name {model_name} not supported" )
assert torch.allclose(A__ , A__ , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(A__ )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(A__ , organization="nielsr" )
processor.push_to_hub(A__ , organization="nielsr" )
slow_tokenizer.push_to_hub(A__ , organization="nielsr" )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
snake_case_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 421 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
snake_case__ : Dict = logging.getLogger(__name__)
def __lowerCamelCase ( ) -> Any:
lowerCamelCase_ : str = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=A__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=A__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=A__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=A__ , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=A__ , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=A__ , type=A__ , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=A__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=A__ , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
lowerCamelCase_ : Optional[int] = parser.parse_args()
return args
def __lowerCamelCase ( A__ : Tuple ) -> str:
def fn(A__ : Dict ):
return tokenizer(examples["""text"""] )
return fn
def __lowerCamelCase ( A__ : Union[str, Any] ) -> Union[str, Any]:
lowerCamelCase_ : Dict = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
lowerCamelCase_ : List[str] = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
lowerCamelCase_ : Optional[int] = tf.train.Features(feature=A__ )
lowerCamelCase_ : Tuple = tf.train.Example(features=A__ )
lowerCamelCase_ : Optional[Any] = example.SerializeToString()
records.append(A__ )
return records
def __lowerCamelCase ( A__ : Tuple ) -> Union[str, Any]:
lowerCamelCase_ : int = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCamelCase_ : str = min(len(A__ ) , args.limit )
lowerCamelCase_ : Dict = dataset.select(range(A__ ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
lowerCamelCase_ : str = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCamelCase_ : Optional[int] = os.path.join(args.output_dir , args.split )
if not os.path.exists(A__ ):
os.makedirs(A__ )
else:
lowerCamelCase_ : Any = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCamelCase_ : List[str] = tokenize_function(A__ )
lowerCamelCase_ : List[Any] = dataset.map(A__ , batched=A__ , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A__ : Union[str, Any] ):
# Concatenate all texts.
lowerCamelCase_ : Optional[int] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCamelCase_ : Union[str, Any] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCamelCase_ : Union[str, Any] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCamelCase_ : List[str] = {
k: [t[i : i + args.max_length] for i in range(0 , A__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCamelCase_ : Optional[int] = dataset_tokenized.map(A__ , batched=A__ , batch_size=1000 , num_proc=4 )
lowerCamelCase_ : List[str] = 0
lowerCamelCase_ : Tuple = 0
for shard in range(0 , len(A__ ) , args.shard_size ):
lowerCamelCase_ : Union[str, Any] = grouped_dataset[shard : shard + args.shard_size]
lowerCamelCase_ : Optional[int] = len(dataset_snapshot["""input_ids"""] )
lowerCamelCase_ : List[str] = os.path.join(A__ , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
lowerCamelCase_ : Optional[int] = get_serialized_examples(A__ )
with tf.io.TFRecordWriter(A__ ) as out_file:
for i in range(len(A__ ) ):
lowerCamelCase_ : Dict = serialized_examples[i]
out_file.write(A__ )
print("""Wrote file {} containing {} records""".format(A__ , A__ ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , """w""" ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=A__ )
if __name__ == "__main__":
snake_case__ : str = parse_args()
main(args)
| 278 | 0 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Dict = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
snake_case__ : str = DatasetInfosDict.from_directory(__lowerCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : int = str(__lowerCAmelCase )
dataset_info.write_to_directory(__lowerCAmelCase )
snake_case__ : Union[str, Any] = DatasetInfo.from_directory(__lowerCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__lowerCAmelCase , '''dataset_info.json''' ) )
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
snake_case__ : str = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
snake_case__ : Dict = dataset_info._to_yaml_dict()
assert sorted(__lowerCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case__ : Union[str, Any] = yaml.safe_dump(__lowerCAmelCase )
snake_case__ : int = yaml.safe_load(__lowerCAmelCase )
assert dataset_info_yaml_dict == reloaded
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
snake_case__ : Tuple = DatasetInfo()
snake_case__ : str = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[Any] = str(__lowerCAmelCase )
dataset_infos_dict.write_to_directory(__lowerCAmelCase )
snake_case__ : int = DatasetInfosDict.from_directory(__lowerCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case__ : List[str] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case__ : Tuple = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__lowerCAmelCase , '''README.md''' ) )
| 714 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : Optional[int] = """markuplm"""
def __init__( self :int ,__lowercase :str=3_0_5_2_2 ,__lowercase :str=7_6_8 ,__lowercase :str=1_2 ,__lowercase :Dict=1_2 ,__lowercase :Optional[Any]=3_0_7_2 ,__lowercase :Any="gelu" ,__lowercase :Optional[int]=0.1 ,__lowercase :Dict=0.1 ,__lowercase :Any=5_1_2 ,__lowercase :List[Any]=2 ,__lowercase :Tuple=0.02 ,__lowercase :List[Any]=1e-1_2 ,__lowercase :List[Any]=0 ,__lowercase :Optional[int]=0 ,__lowercase :str=2 ,__lowercase :Optional[Any]=2_5_6 ,__lowercase :List[str]=1_0_2_4 ,__lowercase :List[str]=2_1_6 ,__lowercase :Union[str, Any]=1_0_0_1 ,__lowercase :int=3_2 ,__lowercase :Union[str, Any]=5_0 ,__lowercase :Optional[Any]="absolute" ,__lowercase :int=True ,__lowercase :Optional[Any]=None ,**__lowercase :Union[str, Any] ,):
super().__init__(
pad_token_id=__lowercase ,bos_token_id=__lowercase ,eos_token_id=__lowercase ,**__lowercase ,)
snake_case__ : Optional[int] = vocab_size
snake_case__ : Any = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : Optional[int] = hidden_act
snake_case__ : Dict = intermediate_size
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : str = initializer_range
snake_case__ : str = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : Optional[Any] = use_cache
snake_case__ : Optional[Any] = classifier_dropout
# additional properties
snake_case__ : Any = max_depth
snake_case__ : Optional[Any] = max_xpath_tag_unit_embeddings
snake_case__ : Dict = max_xpath_subs_unit_embeddings
snake_case__ : str = tag_pad_id
snake_case__ : Union[str, Any] = subs_pad_id
snake_case__ : List[str] = xpath_unit_hidden_size
| 219 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
UpperCamelCase : Tuple = random.Random()
if is_torch_available():
import torch
def UpperCamelCase_ ( __a , __a=1.0 , __a=None , __a=None ) -> Dict:
if rng is None:
a__ : int = global_rng
a__ : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple=7 , lowerCamelCase__ : Tuple=400 , lowerCamelCase__ : List[str]=2_000 , lowerCamelCase__ : Union[str, Any]=1 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : Any=16_000 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , ):
a__ : List[Any] = parent
a__ : Dict = batch_size
a__ : List[Any] = min_seq_length
a__ : Tuple = max_seq_length
a__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__ : Dict = feature_size
a__ : Any = padding_value
a__ : List[Any] = sampling_rate
a__ : Tuple = return_attention_mask
a__ : int = do_normalize
def _UpperCamelCase( self : Tuple ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCamelCase( self : str , lowerCamelCase__ : str=False , lowerCamelCase__ : Optional[Any]=False ):
def _flatten(lowerCamelCase__ : Dict ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
a__ : List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a__ : Tuple = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a__ : Any = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = ASTFeatureExtractor
def _UpperCamelCase( self : Dict ):
a__ : List[Any] = ASTFeatureExtractionTester(self )
def _UpperCamelCase( self : str ):
# Tests that all call wrap to encode_plus and batch_encode_plus
a__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
a__ : str = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
a__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
a__ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# Test batched
a__ : List[str] = feat_extract(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="np" ).input_values
a__ : str = feat_extract(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a__ : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a__ : List[Any] = np.asarray(lowerCamelCase__ )
a__ : int = feat_extract(lowerCamelCase__ , return_tensors="np" ).input_values
a__ : str = feat_extract(lowerCamelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
@require_torch
def _UpperCamelCase( self : str ):
import torch
a__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : int = np.random.rand(100 ).astype(np.floataa )
a__ : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__ : Union[str, Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a__ : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[str] ):
from datasets import load_dataset
a__ : Optional[Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a__ : Union[str, Any] = ds.sort("id" ).select(range(lowerCamelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def _UpperCamelCase( self : str ):
# fmt: off
a__ : List[Any] = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
a__ : Union[str, Any] = self._load_datasamples(1 )
a__ : Dict = ASTFeatureExtractor()
a__ : List[str] = feature_extractor(lowerCamelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1_024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase__ , atol=1E-4 ) )
| 37 |
from collections.abc import Callable
def lowerCamelCase__ ( __lowerCAmelCase : Callable[[float], float] , __lowerCAmelCase : float , __lowerCAmelCase : float ):
"""simple docstring"""
lowerCAmelCase_ = a
lowerCAmelCase_ = b
if function(__lowerCAmelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(__lowerCAmelCase ) == 0:
return b
elif (
function(__lowerCAmelCase ) * function(__lowerCAmelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
lowerCAmelCase_ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__lowerCAmelCase ) == 0:
return mid
elif function(__lowerCAmelCase ) * function(__lowerCAmelCase ) < 0:
lowerCAmelCase_ = mid
else:
lowerCAmelCase_ = mid
lowerCAmelCase_ = start + (end - start) / 2.0
return mid
def lowerCamelCase__ ( __lowerCAmelCase : float ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 290 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __lowercase :
def __init__( self : Any , __lowerCamelCase : int , __lowerCamelCase : Any=13 , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : int=99 , __lowerCamelCase : Tuple=32 , __lowerCamelCase : str=2 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Dict=37 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Dict=5_12 , __lowerCamelCase : str=16 , __lowerCamelCase : str=2 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Any=3 , __lowerCamelCase : int=4 , __lowerCamelCase : int=None , __lowerCamelCase : Dict=10_00 , ) -> Optional[int]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = range_bbox
def __a ( self : List[str] ) -> Any:
'''simple docstring'''
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowercase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase = bbox[i, j, 3]
lowercase = bbox[i, j, 1]
lowercase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase = bbox[i, j, 2]
lowercase = bbox[i, j, 0]
lowercase = t
lowercase = tf.convert_to_tensor(__lowerCamelCase )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self : int , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowercase = TFLayoutLMModel(config=__lowerCamelCase )
lowercase = model(__lowerCamelCase , __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowercase = model(__lowerCamelCase , __lowerCamelCase , token_type_ids=__lowerCamelCase )
lowercase = model(__lowerCamelCase , __lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Dict ) -> List[Any]:
'''simple docstring'''
lowercase = TFLayoutLMForMaskedLM(config=__lowerCamelCase )
lowercase = model(__lowerCamelCase , __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Any ) -> int:
'''simple docstring'''
lowercase = self.num_labels
lowercase = TFLayoutLMForSequenceClassification(config=__lowerCamelCase )
lowercase = model(__lowerCamelCase , __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ) -> List[str]:
'''simple docstring'''
lowercase = self.num_labels
lowercase = TFLayoutLMForTokenClassification(config=__lowerCamelCase )
lowercase = model(__lowerCamelCase , __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : List[Any] ) -> str:
'''simple docstring'''
lowercase = TFLayoutLMForQuestionAnswering(config=__lowerCamelCase )
lowercase = model(__lowerCamelCase , __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : str ) -> int:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,
) = config_and_inputs
lowercase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class __lowercase ( _A , _A , unittest.TestCase ):
lowercase = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
lowercase = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = True
lowercase = 10
def __a ( self : str ) -> Dict:
'''simple docstring'''
lowercase = TFLayoutLMModelTester(self )
lowercase = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def __a ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __a ( self : List[Any] ) -> Any:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def __a ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def __a ( self : str ) -> List[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
def __a ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
@slow
def __a ( self : Dict ) -> Any:
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = TFLayoutLMModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip('''Onnx compliancy broke with TF 2.10''' )
def __a ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
def __UpperCAmelCase ( )-> Optional[Any]:
"""simple docstring"""
lowercase = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
lowercase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowercase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowercase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowercase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __lowercase ( unittest.TestCase ):
@slow
def __a ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' )
lowercase ,lowercase ,lowercase ,lowercase ,lowercase = prepare_layoutlm_batch_inputs()
# forward pass
lowercase = model(input_ids=__lowerCamelCase , bbox=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
# test the sequence output on [0, :3, :3]
lowercase = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCamelCase , atol=1E-3 ) )
# test the pooled output on [1, :3]
lowercase = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , __lowerCamelCase , atol=1E-3 ) )
@slow
def __a ( self : List[str] ) -> Any:
'''simple docstring'''
lowercase = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 )
lowercase ,lowercase ,lowercase ,lowercase ,lowercase = prepare_layoutlm_batch_inputs()
# forward pass
lowercase = model(
input_ids=__lowerCamelCase , bbox=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowercase = outputs.loss
lowercase = (2,)
self.assertEqual(loss.shape , __lowerCamelCase )
# test the shape of the logits
lowercase = outputs.logits
lowercase = (2, 2)
self.assertEqual(logits.shape , __lowerCamelCase )
@slow
def __a ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowercase = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=13 )
lowercase ,lowercase ,lowercase ,lowercase ,lowercase = prepare_layoutlm_batch_inputs()
# forward pass
lowercase = model(
input_ids=__lowerCamelCase , bbox=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
# test the shape of the logits
lowercase = outputs.logits
lowercase = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , __lowerCamelCase )
@slow
def __a ( self : int ) -> int:
'''simple docstring'''
lowercase = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' )
lowercase ,lowercase ,lowercase ,lowercase ,lowercase = prepare_layoutlm_batch_inputs()
# forward pass
lowercase = model(input_ids=__lowerCamelCase , bbox=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
# test the shape of the logits
lowercase = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , __lowerCamelCase )
self.assertEqual(outputs.end_logits.shape , __lowerCamelCase )
| 479 | from __future__ import annotations
from collections.abc import MutableSequence
class __lowercase :
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : MutableSequence[float] ) -> None:
'''simple docstring'''
if len(__lowerCamelCase ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowercase = list(__lowerCamelCase )
lowercase = degree
def __add__( self : Any , __lowerCamelCase : Polynomial ) -> Polynomial:
'''simple docstring'''
if self.degree > polynomial_a.degree:
lowercase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , __lowerCamelCase )
else:
lowercase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , __lowerCamelCase )
def __sub__( self : str , __lowerCamelCase : Polynomial ) -> Polynomial:
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : str ) -> Polynomial:
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : List[str] , __lowerCamelCase : Polynomial ) -> Polynomial:
'''simple docstring'''
lowercase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , __lowerCamelCase )
def __a ( self : List[str] , __lowerCamelCase : int | float ) -> int | float:
'''simple docstring'''
lowercase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : str ) -> str:
'''simple docstring'''
lowercase = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__lowerCamelCase )
return polynomial
def __repr__( self : Tuple ) -> str:
'''simple docstring'''
return self.__str__()
def __a ( self : Union[str, Any] ) -> Polynomial:
'''simple docstring'''
lowercase = [0] * self.degree
for i in range(self.degree ):
lowercase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , __lowerCamelCase )
def __a ( self : Union[str, Any] , __lowerCamelCase : int | float = 0 ) -> Polynomial:
'''simple docstring'''
lowercase = [0] * (self.degree + 2)
lowercase = constant
for i in range(self.degree + 1 ):
lowercase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , __lowerCamelCase )
def __eq__( self : Tuple , __lowerCamelCase : object ) -> bool:
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Tuple , __lowerCamelCase : object ) -> bool:
'''simple docstring'''
return not self.__eq__(__lowerCamelCase )
| 479 | 1 |
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ):
"""simple docstring"""
A_ = [0 for i in range(r + 1 )]
# nc0 = 1
A_ = 1
for i in range(1 ,n + 1 ):
# to compute current row from previous row.
A_ = min(__UpperCamelCase ,__UpperCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5)) | 86 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__a :List[str] = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Any , **UpperCAmelCase : List[str] ):
super().__init__(**UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase : Union[str, List[str]] = None , **UpperCAmelCase : List[Any] , ):
if "text_queries" in kwargs:
A_ = kwargs.pop("text_queries" )
if isinstance(UpperCAmelCase , (str, Image.Image) ):
A_ = {"image": image, "candidate_labels": candidate_labels}
else:
A_ = image
A_ = super().__call__(UpperCAmelCase , **UpperCAmelCase )
return results
def __A ( self : int , **UpperCAmelCase : Tuple ):
A_ = {}
if "threshold" in kwargs:
A_ = kwargs["threshold"]
if "top_k" in kwargs:
A_ = kwargs["top_k"]
return {}, {}, postprocess_params
def __A ( self : List[str] , UpperCAmelCase : Dict ):
A_ = load_image(inputs["image"] )
A_ = inputs["candidate_labels"]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = candidate_labels.split("," )
A_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCAmelCase ):
A_ = self.tokenizer(UpperCAmelCase , return_tensors=self.framework )
A_ = self.image_processor(UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __A ( self : str , UpperCAmelCase : int ):
A_ = model_inputs.pop("target_size" )
A_ = model_inputs.pop("candidate_label" )
A_ = model_inputs.pop("is_last" )
A_ = self.model(**UpperCAmelCase )
A_ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[int]=None ):
A_ = []
for model_output in model_outputs:
A_ = model_output["candidate_label"]
A_ = BaseModelOutput(UpperCAmelCase )
A_ = self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase , threshold=UpperCAmelCase , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
A_ = outputs["scores"][index].item()
A_ = self._get_bounding_box(outputs["boxes"][index][0] )
A_ = {"score": score, "label": label, "box": box}
results.append(UpperCAmelCase )
A_ = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase )
if top_k:
A_ = results[:top_k]
return results
def __A ( self : List[str] , UpperCAmelCase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ = box.int().tolist()
A_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox | 86 | 1 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class A__ ( A ):
"""simple docstring"""
_lowercase : List[str] = ''''''
_lowercase : Optional[Any] = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self : Dict , A_ : Optional[DatasetInfo] = None , A_ : Optional[str] = None , **A_ : int , ):
'''simple docstring'''
super().__init__(self , **A_ )
_lowerCAmelCase : Union[str, Any] = repo_info
_lowerCAmelCase : Optional[int] = token
_lowerCAmelCase : str = None
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
if self.dir_cache is None:
_lowerCAmelCase : int = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowerCAmelCase : Optional[int] = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(A_ ): {"name": str(A_ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __magic_name__ ( self : Dict , A_ : str , A_ : str = "rb" , **A_ : Optional[int] , ):
'''simple docstring'''
if not isinstance(self.repo_info , A_ ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
_lowerCAmelCase : str = hf_hub_url(self.repo_info.id , A_ , revision=self.repo_info.sha )
return fsspec.open(
A_ , mode=A_ , headers=get_authentication_headers_for_url(A_ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def __magic_name__ ( self : Union[str, Any] , A_ : List[Any] , **A_ : str ):
'''simple docstring'''
self._get_dirs()
_lowerCAmelCase : Optional[int] = self._strip_protocol(A_ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(A_ )
def __magic_name__ ( self : Any , A_ : str , A_ : Any=False , **A_ : Dict ):
'''simple docstring'''
self._get_dirs()
_lowerCAmelCase : Dict = PurePosixPath(path.strip("/" ) )
_lowerCAmelCase : str = {}
for p, f in self.dir_cache.items():
_lowerCAmelCase : List[str] = PurePosixPath(p.strip("/" ) )
_lowerCAmelCase : Tuple = p.parent
if root == path:
_lowerCAmelCase : Union[str, Any] = f
_lowerCAmelCase : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 503 |
from __future__ import annotations
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(SCREAMING_SNAKE_CASE ):
print(f'''{i}\t\t{d}''' )
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
for j in range(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list[float]:
"""simple docstring"""
_lowerCAmelCase : Tuple = [float("inf" )] * vertex_count
_lowerCAmelCase : int = 0.0
for _ in range(vertex_count - 1 ):
for j in range(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
_lowerCAmelCase : List[str] = distance[u] + w
_lowerCAmelCase : int = check_negative_cycle(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input('Enter number of vertices: ').strip())
__UpperCAmelCase = int(input('Enter number of edges: ').strip())
__UpperCAmelCase = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
__UpperCAmelCase = {'src': src, 'dst': dest, 'weight': weight}
__UpperCAmelCase = int(input('\nEnter shortest path source:').strip())
__UpperCAmelCase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 503 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ : str = logging.get_logger(__name__)
A__ : Any = '''▁'''
A__ : Optional[int] = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
A__ : Tuple = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
A__ : Tuple = {
'''facebook/s2t-small-librispeech-asr''': 1_0_2_4,
}
A__ : List[Any] = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
A__ : List[str] = {'''mustc''': MUSTC_LANGS}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = MAX_MODEL_INPUT_SIZES
A__ = ['''input_ids''', '''attention_mask''']
A__ = []
def __init__( self : Dict , __a : List[str] , __a : List[str] , __a : List[Any]="<s>" , __a : List[Any]="</s>" , __a : List[str]="<pad>" , __a : Union[str, Any]="<unk>" , __a : List[Any]=False , __a : List[Any]=False , __a : int=None , __a : Any=None , __a : Optional[Dict[str, Any]] = None , **__a : List[str] , ) -> None:
'''simple docstring'''
__snake_case : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , do_upper_case=__a , do_lower_case=__a , tgt_lang=__a , lang_codes=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__snake_case : Tuple = do_upper_case
__snake_case : Optional[Any] = do_lower_case
__snake_case : Tuple = load_json(__a )
__snake_case : Optional[int] = {v: k for k, v in self.encoder.items()}
__snake_case : int = spm_file
__snake_case : Union[str, Any] = load_spm(__a , self.sp_model_kwargs )
if lang_codes is not None:
__snake_case : Tuple = lang_codes
__snake_case : int = LANGUAGES[lang_codes]
__snake_case : Union[str, Any] = [f'''<lang:{lang}>''' for lang in self.langs]
__snake_case : str = {lang: self.sp_model.PieceToId(f'''<lang:{lang}>''' ) for lang in self.langs}
__snake_case : Any = self.lang_tokens
__snake_case : Any = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__snake_case : Dict = {}
@property
def A_ ( self : Optional[int] ) -> int:
'''simple docstring'''
return len(self.encoder )
@property
def A_ ( self : List[Any] ) -> str:
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def A_ ( self : Optional[int] , __a : Optional[Any] ) -> None:
'''simple docstring'''
__snake_case : Dict = new_tgt_lang
self.set_tgt_lang_special_tokens(__a )
def A_ ( self : str , __a : str ) -> None:
'''simple docstring'''
__snake_case : Optional[Any] = self.lang_code_to_id[tgt_lang]
__snake_case : Dict = [lang_code_id]
def A_ ( self : Dict , __a : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__a , out_type=__a )
def A_ ( self : str , __a : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return self.encoder.get(__a , self.encoder[self.unk_token] )
def A_ ( self : Tuple , __a : int ) -> str:
'''simple docstring'''
return self.decoder.get(__a , self.unk_token )
def A_ ( self : Optional[int] , __a : List[str] ) -> str:
'''simple docstring'''
__snake_case : Union[str, Any] = []
__snake_case : Optional[int] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__snake_case : List[str] = self.sp_model.decode(__a )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__snake_case : int = []
else:
current_sub_tokens.append(__a )
__snake_case : List[Any] = self.sp_model.decode(__a )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def A_ ( self : Optional[int] , __a : Union[str, Any] , __a : Union[str, Any]=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def A_ ( self : Dict , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
__snake_case : Tuple = [1] * len(self.prefix_tokens )
__snake_case : Tuple = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def A_ ( self : int ) -> Dict:
'''simple docstring'''
__snake_case : int = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> Dict:
'''simple docstring'''
__snake_case : List[Any] = self.__dict__.copy()
__snake_case : Optional[Any] = None
return state
def __setstate__( self : List[Any] , __a : Dict ) -> None:
'''simple docstring'''
__snake_case : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case : List[str] = {}
__snake_case : List[Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def A_ ( self : Dict , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case : Optional[int] = Path(__a )
assert save_dir.is_dir(), f'''{save_directory} should be a directory'''
__snake_case : List[str] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__snake_case : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , __a )
if os.path.abspath(self.spm_file ) != os.path.abspath(__a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __a )
elif not os.path.isfile(self.spm_file ):
with open(__a , 'wb' ) as fi:
__snake_case : Dict = self.sp_model.serialized_model_proto()
fi.write(__a )
return (str(__a ), str(__a ))
def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
__snake_case : List[Any] = sentencepiece.SentencePieceProcessor(**_UpperCAmelCase )
spm.Load(str(_UpperCAmelCase ) )
return spm
def a_ ( _UpperCAmelCase : str ) -> Union[Dict, List]:
with open(_UpperCAmelCase ,'r' ) as f:
return json.load(_UpperCAmelCase )
def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : str ) -> None:
with open(_UpperCAmelCase ,'w' ) as f:
json.dump(_UpperCAmelCase ,_UpperCAmelCase ,indent=2 )
| 286 |
'''simple docstring'''
import functools
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : list[int] ) -> int:
# Validation
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or not all(isinstance(_UpperCAmelCase ,_UpperCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(_UpperCAmelCase ) != 3 or not all(isinstance(_UpperCAmelCase ,_UpperCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(_UpperCAmelCase ) == 0:
return 0
if min(_UpperCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(_UpperCAmelCase ) >= 3_66:
raise ValueError('All days elements should be less than 366' )
__snake_case : str = set(_UpperCAmelCase )
@functools.cache
def dynamic_programming(_UpperCAmelCase : int ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) ,costs[1] + dynamic_programming(index + 7 ) ,costs[2] + dynamic_programming(index + 30 ) ,)
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286 | 1 |
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
UpperCamelCase = BeautifulSoup(requests.get(_lowercase , params=_lowercase ).content , 'html.parser' )
UpperCamelCase = soup.find('div' , attrs={'class': 'gs_ri'} )
UpperCamelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 2_018,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 170 |
def __lowerCamelCase ( _lowercase ) -> list[int]:
UpperCamelCase = [0 for i in range(len(_lowercase ) )]
# initialize interval's left pointer and right pointer
UpperCamelCase , UpperCamelCase = 0, 0
for i in range(1 , len(_lowercase ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
UpperCamelCase = min_edge
while go_next(_lowercase , _lowercase , _lowercase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase , UpperCamelCase = i, i + z_result[i] - 1
return z_result
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> bool:
return i + z_result[i] < len(_lowercase ) and s[z_result[i]] == s[i + z_result[i]]
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
UpperCamelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowercase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
__UpperCamelCase = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
__UpperCamelCase = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
__UpperCamelCase = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : list[int] ) ->float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
a : Dict = sum(_lowercase ) / len(_lowercase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ : Optional[int] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 521 |
'''simple docstring'''
import math
import sys
def _lowerCamelCase ( lowercase : str ) -> str:
_a = ""
try:
with open(lowercase , "rb" ) as binary_file:
_a = binary_file.read()
for dat in data:
_a = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def _lowerCamelCase ( lowercase : str ) -> str:
_a = {"0": "0", "1": "1"}
_a , _a = "", ""
_a = len(lowercase )
for i in range(len(lowercase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_a = lexicon[curr_string]
result += last_match_id
_a = last_match_id + "0"
if math.loga(lowercase ).is_integer():
_a = {}
for curr_key in list(lowercase ):
_a = lexicon.pop(lowercase )
_a = new_lex
_a = last_match_id + "1"
index += 1
_a = ""
return result
def _lowerCamelCase ( lowercase : str , lowercase : str ) -> None:
_a = 8
try:
with open(lowercase , "wb" ) as opened_file:
_a = [
to_write[i : i + byte_length]
for i in range(0 , len(lowercase ) , lowercase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(lowercase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def _lowerCamelCase ( lowercase : str ) -> str:
_a = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_a = data_bits[counter:]
_a = data_bits[counter + 1 :]
return data_bits
def _lowerCamelCase ( lowercase : str , lowercase : str ) -> None:
_a = read_file_binary(lowercase )
_a = remove_prefix(lowercase )
_a = decompress_data(lowercase )
write_file_binary(lowercase , lowercase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 521 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : Optional[Any]=2_81_23 ) -> str:
'''simple docstring'''
UpperCAmelCase_ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
UpperCAmelCase_ = set()
UpperCAmelCase_ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(snake_case_ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 78 | '''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
SCREAMING_SNAKE_CASE_: Any =False
try:
SCREAMING_SNAKE_CASE_: Optional[Any] =_is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class __A :
def __init__(self : int , __a : str = None , __a : list = [] ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = choices
UpperCAmelCase_ = prompt
if sys.platform == "win32":
UpperCAmelCase_ = "*"
else:
UpperCAmelCase_ = "➔ "
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __a )
else:
forceWrite(self.choices[index] , __a )
def _lowercase (self : Any , __a : int ):
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(__a )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def _lowercase (self : Optional[Any] , __a : Direction , __a : int = 1 ):
UpperCAmelCase_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a )
move_cursor(__a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def _lowercase (self : Dict ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def _lowercase (self : Any ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def _lowercase (self : Optional[Any] ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def _lowercase (self : str ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a )] for number in range(10 )] )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = int(chr(self.current_selection ) )
UpperCAmelCase_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __a )
else:
return
else:
return
def _lowercase (self : Optional[Any] , __a : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
UpperCAmelCase_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__a )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase_ = int(builtins.input() )
except ValueError:
UpperCAmelCase_ = default_choice
else:
UpperCAmelCase_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(__a , "\n" )
return choice
| 78 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a_ :List[Any] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def a ( A__ ) -> Any:
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def a ( A__ ) -> Tuple:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def a ( A__ ) -> Union[str, Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : List[str] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def a ( A__ , A__ ) -> Optional[int]:
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Doctest custom flag to ignore output.
a_ :Optional[Any] = doctest.register_optionflag('IGNORE_RESULT')
a_ :Dict = doctest.OutputChecker
class lowercase ( lowercase__ ):
def lowercase__ ( self : Dict , _lowercase : List[str] , _lowercase : Dict , _lowercase : int ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _lowercase , _lowercase , _lowercase )
a_ :Any = CustomOutputChecker
a_ :Any = HfDoctestModule
a_ :int = HfDocTestParser
| 704 |
from math import pow
def a ( A__ , A__ , A__ , A__ , A__ , ) -> tuple[int, int]:
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
SCREAMING_SNAKE_CASE__ : Optional[int] = int(pow(A__ , A__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = backtrack(
A__ , A__ , current_number + 1 , A__ , A__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = backtrack(
A__ , A__ , current_number + 1 , A__ , A__ )
return current_sum, solutions_count
def a ( A__ , A__ ) -> int:
'''simple docstring'''
if not (1 <= needed_sum <= 1_0_0_0 and 2 <= power <= 1_0):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(A__ , A__ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 | 0 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowerCAmelCase_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class snake_case_ ( datasets.BuilderConfig ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 10000
SCREAMING_SNAKE_CASE : Optional[List[str]] = None
SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
class snake_case_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ParquetConfig
def snake_case__( self : Union[str, Any] ) ->List[Any]:
return datasets.DatasetInfo(features=self.config.features )
def snake_case__( self : Tuple , _UpperCamelCase : Tuple ) ->Optional[Any]:
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
snake_case_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCamelCase , (str, list, tuple) ):
snake_case_ = data_files
if isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case_ = [dl_manager.iter_files(_UpperCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
snake_case_ = []
for split_name, files in data_files.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case_ = [dl_manager.iter_files(_UpperCamelCase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_UpperCamelCase ):
with open(_UpperCamelCase , '''rb''' ) as f:
snake_case_ = datasets.Features.from_arrow_schema(pq.read_schema(_UpperCamelCase ) )
break
splits.append(datasets.SplitGenerator(name=_UpperCamelCase , gen_kwargs={'''files''': files} ) )
return splits
def snake_case__( self : List[Any] , _UpperCamelCase : pa.Table ) ->pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case_ = table_cast(_UpperCamelCase , self.info.features.arrow_schema )
return pa_table
def snake_case__( self : Optional[int] , _UpperCamelCase : Tuple ) ->int:
snake_case_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_UpperCamelCase ) ):
with open(_UpperCamelCase , '''rb''' ) as f:
snake_case_ = pq.ParquetFile(_UpperCamelCase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
snake_case_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(_UpperCamelCase )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(_UpperCamelCase )}: {e}''' )
raise | 39 |
'''simple docstring'''
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Dict =''''''
for i in table:
res += inp[i - 1]
return res
def _a ( lowerCamelCase_ ):
return data[1:] + data[0]
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Optional[int] =''''''
for i in range(len(lowerCamelCase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Dict =int('''0b''' + data[0] + data[-1] , 2 )
snake_case : List[str] =int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case : List[str] =message[:4]
snake_case : Tuple =message[4:]
snake_case : Tuple =apply_table(lowerCamelCase_ , lowerCamelCase_ )
snake_case : str =xor(lowerCamelCase_ , lowerCamelCase_ )
snake_case : str =apply_sbox(lowerCamelCase_ , temp[:4] ) # noqa: E741
snake_case : Union[str, Any] =apply_sbox(lowerCamelCase_ , temp[4:] )
snake_case : List[Any] ='''0''' * (2 - len(lowerCamelCase_ )) + l # noqa: E741
snake_case : Optional[int] ='''0''' * (2 - len(lowerCamelCase_ )) + r
snake_case : Optional[Any] =apply_table(l + r , lowerCamelCase_ )
snake_case : int =xor(lowerCamelCase_ , lowerCamelCase_ )
return temp + right
if __name__ == "__main__":
A : List[str] = input("""Enter 10 bit key: """)
A : str = input("""Enter 8 bit message: """)
A : str = [6, 3, 7, 4, 8, 5, 10, 9]
A : Tuple = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
A : Optional[Any] = [2, 4, 3, 1]
A : Optional[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
A : Dict = [4, 1, 3, 5, 7, 2, 8, 6]
A : List[str] = [4, 1, 2, 3, 2, 3, 4, 1]
A : Optional[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
A : Optional[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
A : Any = apply_table(key, paa_table)
A : int = temp[:5]
A : Any = temp[5:]
A : Tuple = left_shift(left)
A : Dict = left_shift(right)
A : List[str] = apply_table(left + right, pa_table)
A : Optional[Any] = left_shift(left)
A : str = left_shift(right)
A : Optional[Any] = left_shift(left)
A : Dict = left_shift(right)
A : Any = apply_table(left + right, pa_table)
# encryption
A : List[Any] = apply_table(message, IP)
A : Optional[int] = function(expansion, sa, sa, keya, temp)
A : Tuple = temp[4:] + temp[:4]
A : str = function(expansion, sa, sa, keya, temp)
A : Optional[Any] = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
A : int = apply_table(CT, IP)
A : Optional[Any] = function(expansion, sa, sa, keya, temp)
A : Optional[int] = temp[4:] + temp[:4]
A : int = function(expansion, sa, sa, keya, temp)
A : str = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 349 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 614 |
"""simple docstring"""
def __UpperCamelCase ( SCREAMING_SNAKE_CASE = 10 , SCREAMING_SNAKE_CASE = 10_00 , SCREAMING_SNAKE_CASE = True ) -> int:
"""simple docstring"""
assert (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return int((number_a + number_a) / 2 )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
assert (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(SCREAMING_SNAKE_CASE ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
__snake_case = lower
__snake_case = higher
__snake_case = []
while True:
__snake_case = get_avg(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
last_numbers.append(SCREAMING_SNAKE_CASE )
if answer(SCREAMING_SNAKE_CASE ) == "low":
__snake_case = number
elif answer(SCREAMING_SNAKE_CASE ) == "high":
__snake_case = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
__snake_case = int(input("Enter lower value : " ).strip() )
__snake_case = int(input("Enter high value : " ).strip() )
__snake_case = int(input("Enter value to guess : " ).strip() )
guess_the_number(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 614 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils )
_SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
_SCREAMING_SNAKE_CASE = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
_SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
print(F"""Found {torch.cuda.device_count()} devices.""" )
_SCREAMING_SNAKE_CASE = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
print(F"""Found {torch.cuda.device_count()} devices.""" )
_SCREAMING_SNAKE_CASE = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_SCREAMING_SNAKE_CASE = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase_ = Accelerator()
lowerCamelCase_ = (accelerator.state.process_index + 2, 10)
lowerCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device)
lowerCamelCase_ = ''
lowerCamelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowerCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowerCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 418 | from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__lowercase ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
_snake_case : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_snake_case : List[Any] = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
_snake_case : Optional[int] = PipelineDataFormat.from_str(
format=__lowercase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__lowercase , __lowercase )
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ , lowercase_ ):
_snake_case : str = nlp
_snake_case : str = reader
@staticmethod
def UpperCamelCase ( lowercase_ ):
_snake_case : Dict = parser.add_parser("run" , help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" )
run_parser.add_argument("--input" , type=lowercase_ , help="Path to the file to use for inference" )
run_parser.add_argument("--output" , type=lowercase_ , help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" , type=lowercase_ , help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" , type=lowercase_ , help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer" , type=lowercase_ , help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" , type=lowercase_ , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=lowercase_ , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=lowercase_ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." )
run_parser.set_defaults(func=lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Tuple = self._nlp, []
for entry in self._reader:
_snake_case : Optional[Any] = nlp(**lowercase_ ) if self._reader.is_multi_columns else nlp(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
outputs.append(lowercase_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_snake_case : str = self._reader.save_binary(lowercase_ )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(lowercase_ ) | 670 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 71 | def _snake_case ( __snake_case ):
if not isinstance(__snake_case , __snake_case ):
raise TypeError('''Input value must be an \'int\' type''' )
_UpperCamelCase = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 1 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
_lowerCAmelCase = n - k
# Calculate C(n,k)
for i in range(SCREAMING_SNAKE_CASE_ ):
result *= n - i
result //= i + 1
return result
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , SCREAMING_SNAKE_CASE_ ) // (node_count + 1)
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if n < 0:
raise ValueError("factorial() not defined for negative values" )
_lowerCAmelCase = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return catalan_number(SCREAMING_SNAKE_CASE_ ) * factorial(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 18 |
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=[30, 30] , snake_case__=2 , snake_case__=3 , snake_case__=True , snake_case__=True , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=10 , snake_case__=0.02 , snake_case__=3 , snake_case__=None , snake_case__=8 , snake_case__=10 , ):
"""simple docstring"""
lowerCAmelCase : Any = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : Optional[Any] = image_size
lowerCAmelCase : Tuple = patch_size
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : List[str] = is_training
lowerCAmelCase : Optional[int] = use_labels
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : str = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Tuple = intermediate_size
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase : int = type_sequence_label_size
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : List[str] = num_labels
lowerCAmelCase : List[str] = scope
lowerCAmelCase : Dict = n_targets
lowerCAmelCase : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCAmelCase : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCAmelCase : Optional[int] = num_patches + 1 + self.num_detection_tokens
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCAmelCase : List[Any] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCAmelCase : Dict = []
for i in range(self.batch_size ):
lowerCAmelCase : Any = {}
lowerCAmelCase : Optional[int] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=snake_case__ )
lowerCAmelCase : Optional[Any] = torch.rand(self.n_targets , 4 , device=snake_case__ )
labels.append(snake_case__ )
lowerCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self ):
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = YolosModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Dict = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = YolosForObjectDetection(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[int] = model(pixel_values=snake_case__ )
lowerCAmelCase : Tuple = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCAmelCase : Any = model(pixel_values=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = config_and_inputs
lowerCAmelCase : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Union[str, Any] =(YolosModel, YolosForObjectDetection) if is_torch_available() else ()
a : Optional[Any] =(
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
a : List[str] =False
a : Dict =False
a : Any =False
a : Tuple =False
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=False ):
"""simple docstring"""
lowerCAmelCase : Any = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCAmelCase : Union[str, Any] = []
for i in range(self.model_tester.batch_size ):
lowerCAmelCase : Tuple = {}
lowerCAmelCase : int = torch.ones(
size=(self.model_tester.n_targets,) , device=snake_case__ , dtype=torch.long )
lowerCAmelCase : Tuple = torch.ones(
self.model_tester.n_targets , 4 , device=snake_case__ , dtype=torch.float )
labels.append(snake_case__ )
lowerCAmelCase : Optional[Any] = labels
return inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = YolosModelTester(self )
lowerCAmelCase : str = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Any = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Optional[Any] = model_class(snake_case__ )
lowerCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
lowerCAmelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = True
# in YOLOS, the seq_len is different
lowerCAmelCase : Union[str, Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = True
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Optional[Any] = True
lowerCAmelCase : Optional[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : Optional[int] = outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase : List[str] = True
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : List[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : Dict = outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCAmelCase : List[Any] = len(snake_case__ )
# Check attention is always last and order is fine
lowerCAmelCase : str = True
lowerCAmelCase : Any = True
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : Optional[int] = 1
self.assertEqual(out_len + added_hidden_states , len(snake_case__ ) )
lowerCAmelCase : List[Any] = outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowercase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Any = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : Union[str, Any] = outputs.hidden_states
lowerCAmelCase : Tuple = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
# YOLOS has a different seq_length
lowerCAmelCase : Optional[Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : str = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : Optional[int] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : str = YolosModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(snake_case__ )
lowerCAmelCase : Tuple = self.default_image_processor
lowerCAmelCase : List[Any] = prepare_img()
lowerCAmelCase : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(inputs.pixel_values )
# verify outputs
lowerCAmelCase : Optional[int] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase : Optional[Any] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=snake_case__ , )
lowerCAmelCase : Tuple = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , snake_case__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , snake_case__ , atol=1e-4 ) )
# verify postprocessing
lowerCAmelCase : List[Any] = image_processor.post_process_object_detection(
snake_case__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCAmelCase : Optional[int] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(snake_case__ )
lowerCAmelCase : int = [75, 75, 17, 63, 17]
lowerCAmelCase : str = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(snake_case__ )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , snake_case__ , atol=1e-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , snake_case__ )
self.assertTrue(torch.allclose(results["boxes"][0, :] , snake_case__ ) )
| 645 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCamelCase ( _lowercase , unittest.TestCase ):
UpperCAmelCase_ = KandinskyVaaInpaintPipeline
UpperCAmelCase_ = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCAmelCase_ = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCAmelCase_ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase_ = False
@property
def snake_case_ (self ) -> Optional[int]:
return 32
@property
def snake_case_ (self ) -> int:
return 32
@property
def snake_case_ (self ) -> List[str]:
return self.time_input_dim
@property
def snake_case_ (self ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def snake_case_ (self ) -> List[str]:
return 1_00
@property
def snake_case_ (self ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCamelCase = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase = UNetaDConditionModel(**__a )
return model
@property
def snake_case_ (self ) -> str:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ (self ) -> Any:
torch.manual_seed(0 )
UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ (self ) -> List[str]:
UpperCamelCase = self.dummy_unet
UpperCamelCase = self.dummy_movq
UpperCamelCase = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__a , set_alpha_to_one=__a , steps_offset=1 , prediction_type="epsilon" , thresholding=__a , )
UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def snake_case_ (self , __a , __a=0 ) -> int:
UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__a ) ).to(__a )
UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__a )
# create init_image
UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__a ) ).to(__a )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((2_56, 2_56) )
# create mask
UpperCamelCase = np.ones((64, 64) , dtype=np.floataa )
UpperCamelCase = 0
if str(__a ).startswith("mps" ):
UpperCamelCase = torch.manual_seed(__a )
else:
UpperCamelCase = torch.Generator(device=__a ).manual_seed(__a )
UpperCamelCase = {
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def snake_case_ (self ) -> Dict:
UpperCamelCase = "cpu"
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**__a )
UpperCamelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = pipe(**self.get_dummy_inputs(__a ) )
UpperCamelCase = output.images
UpperCamelCase = pipe(
**self.get_dummy_inputs(__a ) , return_dict=__a , )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(F"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
UpperCamelCase = np.array(
[0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def snake_case_ (self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" )
UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCamelCase = np.ones((7_68, 7_68) , dtype=np.floataa )
UpperCamelCase = 0
UpperCamelCase = "a hat"
UpperCamelCase = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__a )
UpperCamelCase = KandinskyVaaInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa )
UpperCamelCase = pipeline.to(__a )
pipeline.set_progress_bar_config(disable=__a )
UpperCamelCase = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase = pipe_prior(
__a , generator=__a , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase = pipeline(
image=__a , mask_image=__a , image_embeds=__a , negative_image_embeds=__a , generator=__a , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="np" , )
UpperCamelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__a , __a )
| 544 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _lowerCamelCase :
pass
| 544 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : List[str] = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = ['CLIPFeatureExtractor']
UpperCAmelCase_ : List[Any] = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[str] = {
'vocab_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt',
},
'tokenizer_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'
),
'google/realm-orqa-nq-openqa': (
'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-nq-reader': (
'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-openqa': (
'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-reader': (
'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase_ : str = {
'google/realm-cc-news-pretrained-embedder': 5_1_2,
'google/realm-cc-news-pretrained-encoder': 5_1_2,
'google/realm-cc-news-pretrained-scorer': 5_1_2,
'google/realm-cc-news-pretrained-openqa': 5_1_2,
'google/realm-orqa-nq-openqa': 5_1_2,
'google/realm-orqa-nq-reader': 5_1_2,
'google/realm-orqa-wq-openqa': 5_1_2,
'google/realm-orqa-wq-reader': 5_1_2,
}
UpperCAmelCase_ : str = {
'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True},
'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-reader': {'do_lower_case': True},
'google/realm-orqa-wq-openqa': {'do_lower_case': True},
'google/realm-orqa-wq-reader': {'do_lower_case': True},
}
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = VOCAB_FILES_NAMES
__lowercase : int = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
__lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = RealmTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=True , __lowercase="[UNK]" , __lowercase="[SEP]" , __lowercase="[PAD]" , __lowercase="[CLS]" , __lowercase="[MASK]" , __lowercase=True , __lowercase=None , **__lowercase , ):
"""simple docstring"""
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
__A : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , __lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __lowercase ) != tokenize_chinese_chars
):
__A : Tuple = getattr(__lowercase , normalizer_state.pop('type' ) )
__A : Optional[int] = do_lower_case
__A : List[str] = strip_accents
__A : Dict = tokenize_chinese_chars
__A : List[Any] = normalizer_class(**__lowercase )
__A : int = do_lower_case
def snake_case__ ( self , __lowercase , **__lowercase ):
"""simple docstring"""
__A : Dict = PaddingStrategy.MAX_LENGTH
__A : Optional[int] = text
__A : Union[str, Any] = kwargs.pop('text_pair' , __lowercase )
__A : List[str] = kwargs.pop('return_tensors' , __lowercase )
__A : int = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(__lowercase ):
if batch_text_pair is not None:
__A : Tuple = batch_text_pair[idx]
else:
__A : Union[str, Any] = None
__A : Optional[int] = super().__call__(__lowercase , __lowercase , return_tensors=__lowercase , **__lowercase )
__A : str = encoded_candidates.get('input_ids' )
__A : Union[str, Any] = encoded_candidates.get('attention_mask' )
__A : Optional[int] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(__lowercase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__lowercase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__lowercase )
__A : List[str] = {key: item for key, item in output_data.items() if len(__lowercase ) != 0}
return BatchEncoding(__lowercase , tensor_type=__lowercase )
def snake_case__ ( self , __lowercase , __lowercase=None ):
"""simple docstring"""
__A : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , __lowercase , __lowercase = None ):
"""simple docstring"""
__A : Union[str, Any] = [self.sep_token_id]
__A : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , __lowercase , __lowercase = None ):
"""simple docstring"""
__A : int = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 365 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowerCamelCase__ ( UpperCAmelCase__ ):
__lowerCamelCase = """luke"""
def __init__( self : Tuple , __a : Dict=50267 , __a : List[Any]=500000 , __a : Optional[Any]=768 , __a : Union[str, Any]=256 , __a : str=12 , __a : Dict=12 , __a : List[str]=3072 , __a : Union[str, Any]="gelu" , __a : Optional[int]=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=512 , __a : str=2 , __a : Any=0.02 , __a : str=1e-12 , __a : Optional[int]=True , __a : List[str]=None , __a : Any=1 , __a : Optional[int]=0 , __a : Dict=2 , **__a : Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__: List[Any] = vocab_size
lowerCamelCase__: Union[str, Any] = entity_vocab_size
lowerCamelCase__: Any = hidden_size
lowerCamelCase__: List[Any] = entity_emb_size
lowerCamelCase__: Union[str, Any] = num_hidden_layers
lowerCamelCase__: str = num_attention_heads
lowerCamelCase__: str = hidden_act
lowerCamelCase__: Any = intermediate_size
lowerCamelCase__: List[str] = hidden_dropout_prob
lowerCamelCase__: Any = attention_probs_dropout_prob
lowerCamelCase__: List[str] = max_position_embeddings
lowerCamelCase__: str = type_vocab_size
lowerCamelCase__: Union[str, Any] = initializer_range
lowerCamelCase__: Tuple = layer_norm_eps
lowerCamelCase__: List[str] = use_entity_aware_attention
lowerCamelCase__: str = classifier_dropout
| 710 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowerCamelCase__ ( A__ ):
__lowerCamelCase = """big_bird"""
def __init__( self : Any , __a : Dict=50358 , __a : Union[str, Any]=768 , __a : Dict=12 , __a : Any=12 , __a : List[Any]=3072 , __a : Optional[Any]="gelu_new" , __a : str=0.1 , __a : int=0.1 , __a : Tuple=4096 , __a : Any=2 , __a : Tuple=0.02 , __a : Optional[Any]=1e-12 , __a : Optional[int]=True , __a : Union[str, Any]=0 , __a : int=1 , __a : List[str]=2 , __a : Any=66 , __a : Union[str, Any]="block_sparse" , __a : Dict=True , __a : Any=False , __a : List[str]=64 , __a : Union[str, Any]=3 , __a : Optional[int]=None , **__a : Any , ):
'''simple docstring'''
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , )
lowerCamelCase__: int = vocab_size
lowerCamelCase__: List[str] = max_position_embeddings
lowerCamelCase__: List[str] = hidden_size
lowerCamelCase__: str = num_hidden_layers
lowerCamelCase__: Dict = num_attention_heads
lowerCamelCase__: Optional[int] = intermediate_size
lowerCamelCase__: List[str] = hidden_act
lowerCamelCase__: Any = hidden_dropout_prob
lowerCamelCase__: Any = attention_probs_dropout_prob
lowerCamelCase__: Optional[Any] = initializer_range
lowerCamelCase__: List[str] = type_vocab_size
lowerCamelCase__: List[Any] = layer_norm_eps
lowerCamelCase__: Union[str, Any] = use_cache
lowerCamelCase__: int = rescale_embeddings
lowerCamelCase__: Any = attention_type
lowerCamelCase__: str = use_bias
lowerCamelCase__: List[str] = block_size
lowerCamelCase__: int = num_random_blocks
lowerCamelCase__: List[str] = classifier_dropout
class lowerCamelCase__ ( A__ ):
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__: List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase__: int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 242 | 0 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class a ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self ) -> Union[str, Any]:
_a : int = 1
_a : Union[str, Any] = 3
_a : Optional[Any] = (3_2, 3_2)
_a : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def __UpperCamelCase ( self ) -> int:
torch.manual_seed(0 )
_a : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
return model
@property
def __UpperCamelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_a : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
_a : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowerCamelCase_ )
@property
def __UpperCamelCase ( self ) -> int:
def extract(*lowerCamelCase_ , **lowerCamelCase_ ):
class a :
'''simple docstring'''
def __init__( self ) -> int:
_a : List[str] = torch.ones([0] )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Optional[int]:
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def __UpperCamelCase ( self ) -> str:
_a : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : Tuple = self.dummy_cond_unet
_a : str = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
_a : Dict = self.dummy_vae
_a : str = self.dummy_text_encoder
_a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_a : Optional[Any] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_a : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_a : List[str] = 'A painting of a squirrel eating a burger'
_a : List[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_a : Dict = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_a : List[Any] = output.images
_a : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_a : int = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_a : Optional[Any] = image[0, -3:, -3:, -1]
_a : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_a : Tuple = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self ) -> Tuple:
_a : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : List[Any] = self.dummy_cond_unet
_a : str = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_a : Optional[int] = self.dummy_vae
_a : Union[str, Any] = self.dummy_text_encoder
_a : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_a : List[str] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_a : Optional[int] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_a : int = 'A painting of a squirrel eating a burger'
_a : Tuple = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_a : Dict = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_a : Optional[Any] = output.images
_a : List[str] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_a : Union[str, Any] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_a : Optional[int] = image[0, -3:, -3:, -1]
_a : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_a : List[str] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self ) -> Tuple:
_a : int = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(pipe.scheduler , lowerCamelCase_ )
assert pipe.safety_checker is None
_a : Optional[Any] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
_a : List[str] = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_a : Any = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __UpperCamelCase ( self ) -> List[Any]:
_a : Optional[int] = self.dummy_cond_unet
_a : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_a : List[Any] = self.dummy_vae
_a : int = self.dummy_text_encoder
_a : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
_a : int = unet.half()
_a : str = vae.half()
_a : int = bert.half()
# make sure here that pndm scheduler skips prk
_a : int = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_a : int = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_a : Tuple = 'A painting of a squirrel eating a burger'
_a : Any = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class a ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ) -> List[Any]:
_a : List[str] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_a : Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_a : Optional[int] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_a : List[Any] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
_a : int = 4_0_0_3_6_6_0_3_4_6
_a : Optional[int] = 7
# without safety guidance (sld_guidance_scale = 0)
_a : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
_a : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
_a : str = output.images
_a : List[str] = image[0, -3:, -3:, -1]
_a : Union[str, Any] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_a : Optional[Any] = torch.manual_seed(lowerCamelCase_ )
_a : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_a : Tuple = output.images
_a : str = image[0, -3:, -3:, -1]
_a : Tuple = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self ) -> Optional[int]:
_a : int = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_a : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_a : List[Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_a : str = 'padme amidala taking a bath artwork, safe for work, no nudity'
_a : Tuple = 2_7_3_4_9_7_1_7_5_5
_a : str = 7
_a : Optional[Any] = torch.manual_seed(lowerCamelCase_ )
_a : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
_a : Optional[int] = output.images
_a : str = image[0, -3:, -3:, -1]
_a : List[str] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_a : Tuple = torch.manual_seed(lowerCamelCase_ )
_a : Optional[int] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_a : List[Any] = output.images
_a : int = image[0, -3:, -3:, -1]
_a : Optional[int] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self ) -> List[Any]:
_a : str = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
_a : Union[str, Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_a : Union[str, Any] = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
_a : List[str] = 1_0_4_4_3_5_5_2_3_4
_a : Union[str, Any] = 1_2
_a : Tuple = torch.manual_seed(lowerCamelCase_ )
_a : List[Any] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
_a : int = output.images
_a : Optional[int] = image[0, -3:, -3:, -1]
_a : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_a : int = torch.manual_seed(lowerCamelCase_ )
_a : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_a : Dict = output.images
_a : str = image[0, -3:, -3:, -1]
_a : Union[str, Any] = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 120 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a : Dict = args.pruning_method
_a : Optional[Any] = args.threshold
_a : Union[str, Any] = args.model_name_or_path.rstrip('/' )
_a : Dict = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_a : List[str] = torch.load(os.path.join(A , 'pytorch_model.bin' ) )
_a : str = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_a : List[Any] = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_a : List[str] = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_a : Optional[Any] = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_a : Tuple = MagnitudeBinarizer.apply(inputs=A , threshold=A )
_a : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_a : str = name[:-6]
_a : int = model[f'''{prefix_}mask_scores''']
_a : List[str] = TopKBinarizer.apply(A , A )
_a : Tuple = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_a : str = name[:-6]
_a : List[str] = model[f'''{prefix_}mask_scores''']
_a : Tuple = ThresholdBinarizer.apply(A , A , A )
_a : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_a : Optional[int] = name[:-6]
_a : Any = model[f'''{prefix_}mask_scores''']
_a , _a : Optional[int] = -0.1, 1.1
_a : List[str] = torch.sigmoid(A )
_a : Dict = s * (r - l) + l
_a : int = s_bar.clamp(min=0.0 , max=1.0 )
_a : List[str] = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_a : Tuple = os.path.join(
os.path.dirname(A ) , f'''bertarized_{os.path.basename(A )}''' )
if not os.path.isdir(A ):
shutil.copytree(A , A )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(A , os.path.join(A , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
main(args)
| 120 | 1 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def A__ ( lowercase: List[Any], lowercase: Optional[int] ) -> Optional[Any]:
A : List[str] =checkpoint
A : int ={}
A : str =vae_state_dict['encoder.conv_in.weight']
A : Any =vae_state_dict['encoder.conv_in.bias']
A : str =vae_state_dict['encoder.conv_out.weight']
A : Optional[int] =vae_state_dict['encoder.conv_out.bias']
A : str =vae_state_dict['encoder.norm_out.weight']
A : List[Any] =vae_state_dict['encoder.norm_out.bias']
A : List[str] =vae_state_dict['decoder.conv_in.weight']
A : Dict =vae_state_dict['decoder.conv_in.bias']
A : Optional[int] =vae_state_dict['decoder.conv_out.weight']
A : Tuple =vae_state_dict['decoder.conv_out.bias']
A : Any =vae_state_dict['decoder.norm_out.weight']
A : Optional[Any] =vae_state_dict['decoder.norm_out.bias']
A : Union[str, Any] =vae_state_dict['quant_conv.weight']
A : str =vae_state_dict['quant_conv.bias']
A : Tuple =vae_state_dict['post_quant_conv.weight']
A : Optional[Any] =vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
A : List[str] =len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
A : List[str] ={
layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(lowercase )
}
# Retrieves the keys for the decoder up blocks only
A : List[Any] =len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
A : Any ={
layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(lowercase )
}
for i in range(lowercase ):
A : List[str] =[key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key]
if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
A : Union[str, Any] =vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.weight' )
A : Dict =vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.bias' )
A : List[str] =renew_vae_resnet_paths(lowercase )
A : Union[str, Any] ={'old': F'down.{i}.block', 'new': F'down_blocks.{i}.resnets'}
assign_to_checkpoint(lowercase, lowercase, lowercase, additional_replacements=[meta_path], config=lowercase )
A : str =[key for key in vae_state_dict if 'encoder.mid.block' in key]
A : int =2
for i in range(1, num_mid_res_blocks + 1 ):
A : Optional[Any] =[key for key in mid_resnets if F'encoder.mid.block_{i}' in key]
A : List[Any] =renew_vae_resnet_paths(lowercase )
A : List[str] ={'old': F'mid.block_{i}', 'new': F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(lowercase, lowercase, lowercase, additional_replacements=[meta_path], config=lowercase )
A : int =[key for key in vae_state_dict if 'encoder.mid.attn' in key]
A : List[Any] =renew_vae_attention_paths(lowercase )
A : List[str] ={'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(lowercase, lowercase, lowercase, additional_replacements=[meta_path], config=lowercase )
conv_attn_to_linear(lowercase )
for i in range(lowercase ):
A : List[str] =num_up_blocks - 1 - i
A : Any =[
key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key
]
if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
A : Union[str, Any] =vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.weight'
]
A : int =vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.bias'
]
A : Tuple =renew_vae_resnet_paths(lowercase )
A : str ={'old': F'up.{block_id}.block', 'new': F'up_blocks.{i}.resnets'}
assign_to_checkpoint(lowercase, lowercase, lowercase, additional_replacements=[meta_path], config=lowercase )
A : int =[key for key in vae_state_dict if 'decoder.mid.block' in key]
A : List[Any] =2
for i in range(1, num_mid_res_blocks + 1 ):
A : Union[str, Any] =[key for key in mid_resnets if F'decoder.mid.block_{i}' in key]
A : List[str] =renew_vae_resnet_paths(lowercase )
A : Any ={'old': F'mid.block_{i}', 'new': F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(lowercase, lowercase, lowercase, additional_replacements=[meta_path], config=lowercase )
A : Dict =[key for key in vae_state_dict if 'decoder.mid.attn' in key]
A : Union[str, Any] =renew_vae_attention_paths(lowercase )
A : Union[str, Any] ={'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(lowercase, lowercase, lowercase, additional_replacements=[meta_path], config=lowercase )
conv_attn_to_linear(lowercase )
return new_checkpoint
def A__ ( lowercase: str, lowercase: str, ) -> Optional[Any]:
# Only support V1
A : str =requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
A : Union[str, Any] =io.BytesIO(r.content )
A : Union[str, Any] =OmegaConf.load(lowercase )
A : str =512
A : Optional[int] ='cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
A : int ={}
with safe_open(lowercase, framework='pt', device='cpu' ) as f:
for key in f.keys():
A : str =f.get_tensor(lowercase )
else:
A : Any =torch.load(lowercase, map_location=lowercase )['state_dict']
# Convert the VAE model.
A : Optional[Any] =create_vae_diffusers_config(lowercase, image_size=lowercase )
A : Any =custom_convert_ldm_vae_checkpoint(lowercase, lowercase )
A : Optional[Any] =AutoencoderKL(**lowercase )
vae.load_state_dict(lowercase )
vae.save_pretrained(lowercase )
if __name__ == "__main__":
_lowercase : Optional[int] =argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
_lowercase : int =parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 661 | import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : Tuple=30 , SCREAMING_SNAKE_CASE__ : int=4_00 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : str=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Any=1 / 2_55 , SCREAMING_SNAKE_CASE__ : int=True , ) -> Optional[int]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A : Optional[Any] =size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
A : Union[str, Any] =parent
A : Union[str, Any] =batch_size
A : Union[str, Any] =num_channels
A : int =min_resolution
A : List[Any] =max_resolution
A : Dict =do_resize
A : Tuple =size
A : List[str] =do_normalize
A : List[Any] =image_mean
A : Dict =image_std
A : Any =do_rescale
A : List[str] =rescale_factor
A : Optional[Any] =do_pad
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict=False ) -> Dict:
if not batched:
A : Any =image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ):
A , A : Union[str, Any] =image.size
else:
A , A : Tuple =image.shape[1], image.shape[2]
if w < h:
A : Any =int(self.size['shortest_edge'] * h / w )
A : Any =self.size['shortest_edge']
elif w > h:
A : Dict =self.size['shortest_edge']
A : Dict =int(self.size['shortest_edge'] * w / h )
else:
A : List[str] =self.size['shortest_edge']
A : Dict =self.size['shortest_edge']
else:
A : List[Any] =[]
for image in image_inputs:
A , A : int =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A : str =max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[0] )[0]
A : Tuple =max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
A : str =ConditionalDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
A : Tuple =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_mean' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_std' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'size' ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : int =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
A : str =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE__ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
A : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
A : List[Any] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : List[str] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A , A : Union[str, Any] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
# Initialize image_processing
A : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
A : Tuple =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : Any =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Tuple =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
A , A : Optional[int] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
A : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
A : Optional[int] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : Tuple =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Tuple =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
A , A : int =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Union[str, Any]:
# prepare image and target
A : Union[str, Any] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
A : List[Any] =json.loads(f.read() )
A : Any ={'image_id': 3_97_69, 'annotations': target}
# encode them
A : str =ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
A : Any =image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# verify pixel values
A : Optional[Any] =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE__ )
A : List[str] =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
A : Dict =torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
A : str =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
A : Dict =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
A : List[str] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
A : Union[str, Any] =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE__ ) )
# verify orig_size
A : List[Any] =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE__ ) )
# verify size
A : int =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
# prepare image, target and masks_path
A : List[str] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
A : Optional[int] =json.loads(f.read() )
A : int ={'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
A : Optional[Any] =pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
A : List[Any] =ConditionalDetrImageProcessor(format='coco_panoptic' )
A : Union[str, Any] =image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , masks_path=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# verify pixel values
A : Dict =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE__ )
A : Dict =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
A : Optional[int] =torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
A : List[Any] =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE__ )
A : Any =torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
A : List[Any] =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
A : Any =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
A : str =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE__ ) )
# verify masks
A : int =82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , SCREAMING_SNAKE_CASE__ )
# verify orig_size
A : Any =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE__ ) )
# verify size
A : str =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE__ ) )
| 661 | 1 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase_ = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def __lowerCamelCase ( a_ : Optional[Any] ) -> Union[str, Any]:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __lowerCamelCase ( a_ : Union[str, Any] ) -> Optional[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_ )
def __lowerCamelCase ( a_ : str ) -> int:
from transformers.testing_utils import pytest_terminal_summary_main
__SCREAMING_SNAKE_CASE :int = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(a_ , id=a_ )
def __lowerCamelCase ( a_ : Optional[int] , a_ : List[str] ) -> Union[str, Any]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
__SCREAMING_SNAKE_CASE :Optional[Any] = 0
# Doctest custom flag to ignore output.
lowerCamelCase_ = doctest.register_optionflag("IGNORE_RESULT")
lowerCamelCase_ = doctest.OutputChecker
class _SCREAMING_SNAKE_CASE( lowerCAmelCase_ ):
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self ,_snake_case ,_snake_case ,_snake_case )
lowerCamelCase_ = CustomOutputChecker
lowerCamelCase_ = HfDoctestModule
lowerCamelCase_ = HfDocTestParser | 498 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ :List[Any] = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
a_ :int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 478 | 0 |
def A__ ( snake_case_ : str ):
SCREAMING_SNAKE_CASE__: int= 0
# if input_string is "aba" than new_input_string become "a|b|a"
SCREAMING_SNAKE_CASE__: Optional[Any]= ''''''
SCREAMING_SNAKE_CASE__: List[str]= ''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(snake_case_ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= 0, 0
# length[i] shows the length of palindromic substring with center i
SCREAMING_SNAKE_CASE__: Optional[int]= [1 for i in range(len(snake_case_ ) )]
# for each character in new_string find corresponding palindromic string
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
for j in range(len(snake_case_ ) ):
SCREAMING_SNAKE_CASE__: Optional[Any]= 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(snake_case_ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
SCREAMING_SNAKE_CASE__: List[str]= 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
SCREAMING_SNAKE_CASE__: Union[str, Any]= j - k + 1 # noqa: E741
SCREAMING_SNAKE_CASE__: int= j + k - 1
# update max_length and start position
if max_length < length[j]:
SCREAMING_SNAKE_CASE__: List[Any]= length[j]
SCREAMING_SNAKE_CASE__: Tuple= j
# create that string
SCREAMING_SNAKE_CASE__: Dict= new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = AltDiffusionPipeline
__a = TEXT_TO_IMAGE_PARAMS
__a = TEXT_TO_IMAGE_BATCH_PARAMS
__a = TEXT_TO_IMAGE_IMAGE_PARAMS
__a = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__: Tuple= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Optional[Any]= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
SCREAMING_SNAKE_CASE__: List[Any]= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
SCREAMING_SNAKE_CASE__: List[Any]= 77
SCREAMING_SNAKE_CASE__: Union[str, Any]= {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: Any= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self ) -> List[str]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def UpperCamelCase_ ( self ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__: Tuple= self.get_dummy_components()
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE__: List[str]= RobertaSeriesModelWithTransformation(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= text_encoder
SCREAMING_SNAKE_CASE__: List[Any]= AltDiffusionPipeline(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= alt_pipe.to(lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= '''A photo of an astronaut'''
SCREAMING_SNAKE_CASE__: Any= alt_pipe(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= output.images
SCREAMING_SNAKE_CASE__: Tuple= image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__: Any= np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: List[str]= '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__: Any= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: List[str]= PNDMScheduler(skip_prk_steps=lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE__: str= RobertaSeriesModelWithTransformation(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= text_encoder
SCREAMING_SNAKE_CASE__: Any= AltDiffusionPipeline(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= alt_pipe.to(lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= alt_pipe(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= output.images
SCREAMING_SNAKE_CASE__: Tuple= image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__: List[Any]= np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Optional[int]:
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__: Any= AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= alt_pipe.to(lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= alt_pipe([prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE__: List[Any]= output.images
SCREAMING_SNAKE_CASE__: Dict= image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__: Optional[int]= np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: List[Any]= DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE__: List[str]= AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= alt_pipe.to(lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__: Any= torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= alt_pipe([prompt] , generator=lowerCAmelCase , num_inference_steps=2 , output_type='''numpy''' )
SCREAMING_SNAKE_CASE__: List[str]= output.images
SCREAMING_SNAKE_CASE__: Dict= image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__: Optional[Any]= np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 107 | 1 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __SCREAMING_SNAKE_CASE( SCREAMING_SNAKE_CASE_ ):
def __get__( self: int , UpperCamelCase: Tuple , UpperCamelCase: Any=None ) -> Optional[int]:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
snake_case__ = '__cached_' + self.fget.__name__
snake_case__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if cached is None:
snake_case__ = self.fget(SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return cached
def a_ ( _A ) -> Tuple:
"""simple docstring"""
snake_case__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a_ ( _A ) -> str:
"""simple docstring"""
if is_torch_fx_proxy(_a ):
return True
if is_torch_available():
import torch
if isinstance(_a , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_a , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_a , (jnp.ndarray, Tracer) ):
return True
return isinstance(_a , np.ndarray )
def a_ ( _A ) -> Tuple:
"""simple docstring"""
return isinstance(_a , np.ndarray )
def a_ ( _A ) -> str:
"""simple docstring"""
return _is_numpy(_a )
def a_ ( _A ) -> Any:
"""simple docstring"""
import torch
return isinstance(_a , torch.Tensor )
def a_ ( _A ) -> Any:
"""simple docstring"""
return False if not is_torch_available() else _is_torch(_a )
def a_ ( _A ) -> str:
"""simple docstring"""
import torch
return isinstance(_a , torch.device )
def a_ ( _A ) -> Optional[int]:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(_a )
def a_ ( _A ) -> Dict:
"""simple docstring"""
import torch
if isinstance(_a , _a ):
if hasattr(_a , _a ):
snake_case__ = getattr(_a , _a )
else:
return False
return isinstance(_a , torch.dtype )
def a_ ( _A ) -> Dict:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(_a )
def a_ ( _A ) -> Optional[Any]:
"""simple docstring"""
import tensorflow as tf
return isinstance(_a , tf.Tensor )
def a_ ( _A ) -> Union[str, Any]:
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(_a )
def a_ ( _A ) -> Dict:
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_a , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(_a )
return type(_a ) == tf.Tensor
def a_ ( _A ) -> List[Any]:
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(_a )
def a_ ( _A ) -> Optional[Any]:
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(_a , jnp.ndarray )
def a_ ( _A ) -> int:
"""simple docstring"""
return False if not is_flax_available() else _is_jax(_a )
def a_ ( _A ) -> List[str]:
"""simple docstring"""
if isinstance(_a , (dict, UserDict) ):
return {k: to_py_obj(_a ) for k, v in obj.items()}
elif isinstance(_a , (list, tuple) ):
return [to_py_obj(_a ) for o in obj]
elif is_tf_tensor(_a ):
return obj.numpy().tolist()
elif is_torch_tensor(_a ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_a ):
return np.asarray(_a ).tolist()
elif isinstance(_a , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a_ ( _A ) -> Optional[int]:
"""simple docstring"""
if isinstance(_a , (dict, UserDict) ):
return {k: to_numpy(_a ) for k, v in obj.items()}
elif isinstance(_a , (list, tuple) ):
return np.array(_a )
elif is_tf_tensor(_a ):
return obj.numpy()
elif is_torch_tensor(_a ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_a ):
return np.asarray(_a )
else:
return obj
class __SCREAMING_SNAKE_CASE( SCREAMING_SNAKE_CASE_ ):
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case__ = fields(self )
# Safety and consistency checks
if not len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
snake_case__ = getattr(self , class_fields[0].name )
snake_case__ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case__ = first_field.items()
snake_case__ = True
else:
try:
snake_case__ = iter(SCREAMING_SNAKE_CASE__ )
snake_case__ = True
except TypeError:
snake_case__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(SCREAMING_SNAKE_CASE__ ):
if (
not isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )
or not len(SCREAMING_SNAKE_CASE__ ) == 2
or not isinstance(element[0] , SCREAMING_SNAKE_CASE__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
snake_case__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
snake_case__ = element[1]
elif first_field is not None:
snake_case__ = first_field
else:
for field in class_fields:
snake_case__ = getattr(self , field.name )
if v is not None:
snake_case__ = v
def __delitem__( self: Optional[Any] , *UpperCamelCase: Dict , **UpperCamelCase: Tuple ) -> int:
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def lowerCAmelCase_ ( self: str , *UpperCamelCase: Union[str, Any] , **UpperCamelCase: int ) -> str:
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def lowerCAmelCase_ ( self: Union[str, Any] , *UpperCamelCase: List[str] , **UpperCamelCase: Tuple ) -> Dict:
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def lowerCAmelCase_ ( self: List[str] , *UpperCamelCase: Tuple , **UpperCamelCase: Optional[Any] ) -> int:
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self: Any , UpperCamelCase: Dict ) -> List[Any]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self: str , UpperCamelCase: Dict , UpperCamelCase: Any ) -> Union[str, Any]:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
super().__setattr__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __setitem__( self: List[Any] , UpperCamelCase: Any , UpperCamelCase: Tuple ) -> List[str]:
# Will raise a KeyException if needed
super().__setitem__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( self: Dict ) -> Tuple:
return tuple(self[k] for k in self.keys() )
class __SCREAMING_SNAKE_CASE( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
@classmethod
def lowerCAmelCase_ ( cls: Any , UpperCamelCase: Tuple ) -> str:
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class __SCREAMING_SNAKE_CASE( SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = 'longest'
_UpperCAmelCase = 'max_length'
_UpperCAmelCase = 'do_not_pad'
class __SCREAMING_SNAKE_CASE( SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = 'pt'
_UpperCAmelCase = 'tf'
_UpperCAmelCase = 'np'
_UpperCAmelCase = 'jax'
class __SCREAMING_SNAKE_CASE:
def __init__( self: Any , UpperCamelCase: List[ContextManager] ) -> List[Any]:
snake_case__ = context_managers
snake_case__ = ExitStack()
def __enter__( self: str ) -> Optional[Any]:
for context_manager in self.context_managers:
self.stack.enter_context(SCREAMING_SNAKE_CASE__ )
def __exit__( self: List[str] , *UpperCamelCase: Optional[Any] , **UpperCamelCase: Tuple ) -> Any:
self.stack.__exit__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a_ ( _A ) -> str:
"""simple docstring"""
snake_case__ = infer_framework(_a )
if framework == "tf":
snake_case__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
snake_case__ = inspect.signature(model_class.forward ) # PyTorch models
else:
snake_case__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a_ ( _A ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ = model_class.__name__
snake_case__ = infer_framework(_a )
if framework == "tf":
snake_case__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
snake_case__ = inspect.signature(model_class.forward ) # PyTorch models
else:
snake_case__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a_ ( _A , _A = "" , _A = "." ) -> List[Any]:
"""simple docstring"""
def _flatten_dict(_A , _A="" , _A="." ):
for k, v in d.items():
snake_case__ = str(_a ) + delimiter + str(_a ) if parent_key else k
if v and isinstance(_a , _a ):
yield from flatten_dict(_a , _a , delimiter=_a ).items()
else:
yield key, v
return dict(_flatten_dict(_a , _a , _a ) )
@contextmanager
def a_ ( _A , _A = False ) -> Dict:
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a_ ( _A , _A=None ) -> Optional[int]:
"""simple docstring"""
if is_numpy_array(_a ):
return np.transpose(_a , axes=_a )
elif is_torch_tensor(_a ):
return array.T if axes is None else array.permute(*_a )
elif is_tf_tensor(_a ):
import tensorflow as tf
return tf.transpose(_a , perm=_a )
elif is_jax_tensor(_a ):
return jnp.transpose(_a , axes=_a )
else:
raise ValueError(f'''Type not supported for transpose: {type(_a )}.''' )
def a_ ( _A , _A ) -> Tuple:
"""simple docstring"""
if is_numpy_array(_a ):
return np.reshape(_a , _a )
elif is_torch_tensor(_a ):
return array.reshape(*_a )
elif is_tf_tensor(_a ):
import tensorflow as tf
return tf.reshape(_a , _a )
elif is_jax_tensor(_a ):
return jnp.reshape(_a , _a )
else:
raise ValueError(f'''Type not supported for reshape: {type(_a )}.''' )
def a_ ( _A , _A=None ) -> List[str]:
"""simple docstring"""
if is_numpy_array(_a ):
return np.squeeze(_a , axis=_a )
elif is_torch_tensor(_a ):
return array.squeeze() if axis is None else array.squeeze(dim=_a )
elif is_tf_tensor(_a ):
import tensorflow as tf
return tf.squeeze(_a , axis=_a )
elif is_jax_tensor(_a ):
return jnp.squeeze(_a , axis=_a )
else:
raise ValueError(f'''Type not supported for squeeze: {type(_a )}.''' )
def a_ ( _A , _A ) -> str:
"""simple docstring"""
if is_numpy_array(_a ):
return np.expand_dims(_a , _a )
elif is_torch_tensor(_a ):
return array.unsqueeze(dim=_a )
elif is_tf_tensor(_a ):
import tensorflow as tf
return tf.expand_dims(_a , axis=_a )
elif is_jax_tensor(_a ):
return jnp.expand_dims(_a , axis=_a )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(_a )}.''' )
def a_ ( _A ) -> Optional[int]:
"""simple docstring"""
if is_numpy_array(_a ):
return np.size(_a )
elif is_torch_tensor(_a ):
return array.numel()
elif is_tf_tensor(_a ):
import tensorflow as tf
return tf.size(_a )
elif is_jax_tensor(_a ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(_a )}.''' )
def a_ ( _A , _A ) -> str:
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(_a , (tuple, list) ):
snake_case__ = [f'''{repo_id}--{v}''' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
snake_case__ = f'''{repo_id}--{value}'''
return auto_map
def a_ ( _A ) -> Optional[int]:
"""simple docstring"""
for base_class in inspect.getmro(_a ):
snake_case__ = base_class.__module__
snake_case__ = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 328 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 510 | 0 |
from collections.abc import Callable
import numpy as np
def __UpperCamelCase ( _A : Callable , _A : float , _A : float , _A : float , _A : float ) ->np.ndarray:
"""simple docstring"""
lowerCamelCase_ =int(np.ceil((x_end - xa) / step_size ) )
lowerCamelCase_ =np.zeros((n + 1,) )
lowerCamelCase_ =ya
lowerCamelCase_ =xa
for k in range(_A ):
lowerCamelCase_ =y[k] + step_size * ode_func(_A , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase_ =get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(_SCREAMING_SNAKE_CASE ) , torch_builtin(_SCREAMING_SNAKE_CASE ) ) )
self.assertFalse(torch.allclose(gelu_python(_SCREAMING_SNAKE_CASE ) , gelu_new(_SCREAMING_SNAKE_CASE ) ) )
def _snake_case ( self )-> int:
lowerCamelCase_ =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase_ =get_activation("""gelu""" )
lowerCamelCase_ =get_activation("""gelu_10""" )
lowerCamelCase_ =torch_builtin(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =geluaa(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(_SCREAMING_SNAKE_CASE ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _snake_case ( self )-> Dict:
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
get_activation("""bogus""" )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
get_activation(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Any:
lowerCamelCase_ =get_activation("""gelu""" )
lowerCamelCase_ =1
lowerCamelCase_ =get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =acta.a
| 75 | 0 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowerCAmelCase__ : int =logging.get_logger(__name__)
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , **lowerCAmelCase__ ):
"""simple docstring"""
requires_backends(self , ['bs4'] )
super().__init__(**lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : Tuple = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
SCREAMING_SNAKE_CASE_ : Optional[int] = parent.find_all(child.name , recursive=lowerCAmelCase__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(lowerCAmelCase__ ) else next(i for i, s in enumerate(lowerCAmelCase__ , 1 ) if s is child ) )
SCREAMING_SNAKE_CASE_ : Dict = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = BeautifulSoup(lowerCAmelCase__ , 'html.parser' )
SCREAMING_SNAKE_CASE_ : Optional[int] = []
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : List[Any] = []
for element in html_code.descendants:
if type(lowerCAmelCase__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
SCREAMING_SNAKE_CASE_ : Any = html.unescape(lowerCAmelCase__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.xpath_soup(lowerCAmelCase__ )
stringaxtag_seq.append(lowerCAmelCase__ )
stringaxsubs_seq.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError('Number of doc strings and xtags does not correspond' )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError('Number of doc strings and xsubs does not correspond' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ''
for tagname, subs in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
xpath += F'''/{tagname}'''
if subs != 0:
xpath += F'''[{subs}]'''
return xpath
def __call__( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
# Check that strings has a valid type
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
if len(lowerCAmelCase__ ) == 0 or isinstance(html_strings[0] , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : str = True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
F'''but is of type {type(lowerCAmelCase__ )}.''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = bool(isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(html_strings[0] , lowerCAmelCase__ )) )
if not is_batched:
SCREAMING_SNAKE_CASE_ : List[str] = [html_strings]
# Get nodes + xpaths
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for html_string in html_strings:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.get_three_from_single(lowerCAmelCase__ )
nodes.append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = []
for node, tag_list, sub_list in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.construct_xpath(lowerCAmelCase__ , lowerCAmelCase__ )
xpath_strings.append(lowerCAmelCase__ )
xpaths.append(lowerCAmelCase__ )
# return as Dict
SCREAMING_SNAKE_CASE_ : Dict = {'nodes': nodes, 'xpaths': xpaths}
SCREAMING_SNAKE_CASE_ : Optional[Any] = BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
return encoded_inputs
| 101 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=2 , snake_case_=56 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=2 , snake_case_=7 , snake_case_="gelu_new" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=4 , snake_case_="block_sparse" , snake_case_=True , snake_case_=False , snake_case_=2 , snake_case_=3 , ) -> Dict:
SCREAMING_SNAKE_CASE : Tuple =parent
SCREAMING_SNAKE_CASE : int =batch_size
SCREAMING_SNAKE_CASE : List[str] =seq_length
SCREAMING_SNAKE_CASE : Tuple =is_training
SCREAMING_SNAKE_CASE : Dict =use_attention_mask
SCREAMING_SNAKE_CASE : List[Any] =use_token_type_ids
SCREAMING_SNAKE_CASE : str =use_labels
SCREAMING_SNAKE_CASE : Dict =vocab_size
SCREAMING_SNAKE_CASE : str =hidden_size
SCREAMING_SNAKE_CASE : Dict =num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] =num_attention_heads
SCREAMING_SNAKE_CASE : List[str] =intermediate_size
SCREAMING_SNAKE_CASE : List[str] =hidden_act
SCREAMING_SNAKE_CASE : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str =max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] =type_vocab_size
SCREAMING_SNAKE_CASE : Tuple =type_sequence_label_size
SCREAMING_SNAKE_CASE : str =initializer_range
SCREAMING_SNAKE_CASE : List[Any] =num_choices
SCREAMING_SNAKE_CASE : Union[str, Any] =rescale_embeddings
SCREAMING_SNAKE_CASE : Tuple =attention_type
SCREAMING_SNAKE_CASE : str =use_bias
SCREAMING_SNAKE_CASE : List[str] =block_size
SCREAMING_SNAKE_CASE : Optional[Any] =num_random_blocks
def __a ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Any =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : int =None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE : Any =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Tuple =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : int =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] =BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __a ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Union[str, Any] =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] =config_and_inputs
SCREAMING_SNAKE_CASE : str ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def __a ( self ) -> Dict:
SCREAMING_SNAKE_CASE : Optional[Any] =FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __a ( self ) -> str:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __a ( self ) -> Optional[int]:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __a ( self ) -> Dict:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __a ( self ) -> Any:
super().test_hidden_states_output()
@slow
def __a ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict =model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(snake_case_ )
def __a ( self ) -> str:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __a ( self ) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : Dict =self._prepare_for_class(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE : Optional[int] =model_class(snake_case_ )
@jax.jit
def model_jitted(snake_case_ , snake_case_=None , **snake_case_ ):
return model(input_ids=snake_case_ , attention_mask=snake_case_ , **snake_case_ )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] =model_jitted(**snake_case_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : Optional[Any] =model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __a ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=1E-5 , snake_case_="outputs" , snake_case_=None ) -> int:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
| 258 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCamelCase_ = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class snake_case ( unittest.TestCase ):
a_ : List[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a_ : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a_ : Union[str, Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a_ : Dict = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def UpperCAmelCase__ ( self) ->Any:
a_ = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt")
a_ = text_classifier("This is great !")
self.assertEqual(nested_simplify(__UpperCAmelCase) , [{"label": "LABEL_0", "score": 0.504}])
a_ = text_classifier("This is great !" , top_k=2)
self.assertEqual(
nested_simplify(__UpperCAmelCase) , [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}])
a_ = text_classifier(["This is great !", "This is bad"] , top_k=2)
self.assertEqual(
nested_simplify(__UpperCAmelCase) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
a_ = text_classifier("This is great !" , top_k=1)
self.assertEqual(nested_simplify(__UpperCAmelCase) , [{"label": "LABEL_0", "score": 0.504}])
# Legacy behavior
a_ = text_classifier("This is great !" , return_all_scores=__UpperCAmelCase)
self.assertEqual(nested_simplify(__UpperCAmelCase) , [{"label": "LABEL_0", "score": 0.504}])
a_ = text_classifier("This is great !" , return_all_scores=__UpperCAmelCase)
self.assertEqual(
nested_simplify(__UpperCAmelCase) , [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]])
a_ = text_classifier(["This is great !", "Something else"] , return_all_scores=__UpperCAmelCase)
self.assertEqual(
nested_simplify(__UpperCAmelCase) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
a_ = text_classifier(["This is great !", "Something else"] , return_all_scores=__UpperCAmelCase)
self.assertEqual(
nested_simplify(__UpperCAmelCase) , [
{"label": "LABEL_0", "score": 0.504},
{"label": "LABEL_0", "score": 0.504},
] , )
@require_torch
def UpperCAmelCase__ ( self) ->str:
import torch
a_ = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu") , )
a_ = text_classifier("This is great !")
self.assertEqual(nested_simplify(__UpperCAmelCase) , [{"label": "LABEL_0", "score": 0.504}])
@require_tf
def UpperCAmelCase__ ( self) ->int:
a_ = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf")
a_ = text_classifier("This is great !")
self.assertEqual(nested_simplify(__UpperCAmelCase) , [{"label": "LABEL_0", "score": 0.504}])
@slow
@require_torch
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = pipeline("text-classification")
a_ = text_classifier("This is great !")
self.assertEqual(nested_simplify(__UpperCAmelCase) , [{"label": "POSITIVE", "score": 1.0}])
a_ = text_classifier("This is bad !")
self.assertEqual(nested_simplify(__UpperCAmelCase) , [{"label": "NEGATIVE", "score": 1.0}])
a_ = text_classifier("Birds are a type of animal")
self.assertEqual(nested_simplify(__UpperCAmelCase) , [{"label": "POSITIVE", "score": 0.988}])
@slow
@require_tf
def UpperCAmelCase__ ( self) ->Tuple:
a_ = pipeline("text-classification" , framework="tf")
a_ = text_classifier("This is great !")
self.assertEqual(nested_simplify(__UpperCAmelCase) , [{"label": "POSITIVE", "score": 1.0}])
a_ = text_classifier("This is bad !")
self.assertEqual(nested_simplify(__UpperCAmelCase) , [{"label": "NEGATIVE", "score": 1.0}])
a_ = text_classifier("Birds are a type of animal")
self.assertEqual(nested_simplify(__UpperCAmelCase) , [{"label": "POSITIVE", "score": 0.988}])
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
a_ = TextClassificationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase)
return text_classifier, ["HuggingFace is in", "This is another test"]
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->str:
a_ = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
a_ = "HuggingFace is in"
a_ = text_classifier(__UpperCAmelCase)
self.assertEqual(nested_simplify(__UpperCAmelCase) , [{"label": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase)}])
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values())
a_ = ["HuggingFace is in ", "Paris is in France"]
a_ = text_classifier(__UpperCAmelCase)
self.assertEqual(
nested_simplify(__UpperCAmelCase) , [{"label": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase)}, {"label": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase)}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values())
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values())
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
a_ = text_classifier(__UpperCAmelCase , top_k=__UpperCAmelCase)
a_ = len(model.config.idalabel.values())
self.assertEqual(
nested_simplify(__UpperCAmelCase) , [[{"label": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase)}] * N, [{"label": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase)}] * N] , )
a_ = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
a_ = text_classifier(__UpperCAmelCase)
self.assertEqual(
nested_simplify(__UpperCAmelCase) , {"label": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase)} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values())
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
a_ = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(__UpperCAmelCase):
text_classifier(__UpperCAmelCase)
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
a_ = text_classifier([[["HuggingFace is in ", "Paris is in France"]]])
self.assertEqual(
nested_simplify(__UpperCAmelCase) , [{"label": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase)}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values()) | 710 |
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCamelCase_ = pytest.mark.integration
UpperCamelCase_ = {'comet'}
UpperCamelCase_ = importlib.util.find_spec('fairseq') is not None
UpperCamelCase_ = {'code_eval'}
UpperCamelCase_ = os.name == 'nt'
UpperCamelCase_ = {'bertscore', 'frugalscore', 'perplexity'}
UpperCamelCase_ = importlib.util.find_spec('transformers') is not None
def UpperCamelCase ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
@wraps(UpperCAmelCase )
def wrapper(self , UpperCAmelCase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , UpperCAmelCase )
return wrapper
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
@wraps(UpperCAmelCase )
def wrapper(self , UpperCAmelCase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , UpperCAmelCase )
return wrapper
def UpperCamelCase ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
@wraps(UpperCAmelCase )
def wrapper(self , UpperCAmelCase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , UpperCAmelCase )
return wrapper
def UpperCamelCase ( ) ->List[str]:
"""simple docstring"""
a_ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@local
class snake_case ( parameterized.TestCase ):
a_ : List[Any] = {}
a_ : str = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning")
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
a_ = "[...]"
a_ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __UpperCAmelCase)).module_path)
a_ = datasets.load.import_main_class(metric_module.__name__ , dataset=__UpperCAmelCase)
# check parameters
a_ = inspect.signature(metric._compute).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs
# run doctest
with self.patch_intensive_calls(__UpperCAmelCase , metric_module.__name__):
with self.use_local_metrics():
try:
a_ = doctest.testmod(__UpperCAmelCase , verbose=__UpperCAmelCase , raise_on_error=__UpperCAmelCase)
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@slow
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->int:
a_ = "[...]"
a_ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __UpperCAmelCase)).module_path)
# run doctest
with self.use_local_metrics():
a_ = doctest.testmod(__UpperCAmelCase , verbose=__UpperCAmelCase , raise_on_error=__UpperCAmelCase)
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@contextmanager
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->List[Any]:
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__UpperCAmelCase):
yield
else:
yield
@contextmanager
def UpperCAmelCase__ ( self) ->Dict:
def load_local_metric(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase):
return load_metric(os.path.join("metrics" , __UpperCAmelCase) , *__UpperCAmelCase , **__UpperCAmelCase)
with patch("datasets.load_metric") as mock_load_metric:
a_ = load_local_metric
yield
@classmethod
def UpperCAmelCase__ ( cls , __UpperCAmelCase) ->int:
def wrapper(__UpperCAmelCase):
a_ = contextmanager(__UpperCAmelCase)
a_ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->str:
assert len(input_dict["input_ids"]) == 2
return np.array([1.03, 1.04])
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
a_ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def UpperCamelCase ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
import torch
def bert_cos_score_idf(UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCAmelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
a_ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def UpperCamelCase ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
def load_from_checkpoint(UpperCAmelCase ):
class snake_case :
def UpperCAmelCase__ ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase) ->Union[str, Any]:
assert len(__UpperCAmelCase) == 2
a_ = [0.19, 0.92]
return scores, sum(__UpperCAmelCase) / len(__UpperCAmelCase)
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
a_ = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
a_ = load_from_checkpoint
yield
def UpperCamelCase ( ) ->List[Any]:
"""simple docstring"""
a_ = load_metric(os.path.join("metrics" , "seqeval" ) )
a_ = "ERROR"
a_ = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(UpperCAmelCase , match=re.escape(UpperCAmelCase ) ):
metric.compute(predictions=[] , references=[] , scheme=UpperCAmelCase ) | 210 | 0 |
def lowercase__( A , A ):
return abs(A ) if a == 0 else greatest_common_divisor(b % a , A )
def lowercase__( A , A ):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
snake_case__ , snake_case__ : Optional[Any] = y, x % y
return abs(A )
def lowercase__( ):
try:
snake_case__ : Optional[Any] = input('Enter two integers separated by comma (,): ' ).split(',' )
snake_case__ : Tuple = int(nums[0] )
snake_case__ : Optional[Any] = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(A , A )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(A , A )}''' )
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input' )
if __name__ == "__main__":
main()
| 170 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowerCamelCase : List[str] = logging.getLogger(__name__)
def lowercase__( A , A ):
snake_case__ : int = np.argmax(A , axis=1 )
return np.sum(outputs == labels )
def lowercase__( A ):
with open(A , encoding='utf_8' ) as f:
snake_case__ : Dict = csv.reader(A )
snake_case__ : int = []
next(A ) # skip the first line
for line in tqdm(A ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowercase__( A , A , A , A , A , A ):
snake_case__ : int = []
for dataset in encoded_datasets:
snake_case__ : str = len(A )
snake_case__ : Dict = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
snake_case__ : List[str] = np.zeros((n_batch, 2) , dtype=np.intaa )
snake_case__ : Dict = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
snake_case__ : Dict = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(A ):
snake_case__ : Optional[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
snake_case__ : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
snake_case__ : Optional[int] = with_conta
snake_case__ : int = with_conta
snake_case__ : Optional[Any] = len(A ) - 1
snake_case__ : str = len(A ) - 1
snake_case__ : Any = with_conta
snake_case__ : Any = with_conta
snake_case__ : List[str] = mc_label
snake_case__ : List[Any] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(A ) for t in all_inputs ) )
return tensor_datasets
def lowercase__( ):
snake_case__ : Any = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=A , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=A , type=A , required=A , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=A , default='' )
parser.add_argument('--eval_dataset' , type=A , default='' )
parser.add_argument('--seed' , type=A , default=4_2 )
parser.add_argument('--num_train_epochs' , type=A , default=3 )
parser.add_argument('--train_batch_size' , type=A , default=8 )
parser.add_argument('--eval_batch_size' , type=A , default=1_6 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=A , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=A , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=A , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=A , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=A , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=A , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=A , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=A , default=0.01 )
parser.add_argument('--lm_coef' , type=A , default=0.9 )
parser.add_argument('--n_valid' , type=A , default=3_7_4 )
parser.add_argument('--server_ip' , type=A , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=A , default='' , help='Can be used for distant debugging.' )
snake_case__ : Optional[int] = parser.parse_args()
print(A )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
snake_case__ : Union[str, Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
snake_case__ : int = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(A , A ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
snake_case__ : Tuple = ['_start_', '_delimiter_', '_classify_']
snake_case__ : Any = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(A )
snake_case__ : Tuple = tokenizer.convert_tokens_to_ids(A )
snake_case__ : int = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(A ) )
model.to(A )
# Load and encode the datasets
def tokenize_and_encode(A ):
if isinstance(A , A ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(A ) )
elif isinstance(A , A ):
return obj
return [tokenize_and_encode(A ) for o in obj]
logger.info('Encoding dataset...' )
snake_case__ : str = load_rocstories_dataset(args.train_dataset )
snake_case__ : List[str] = load_rocstories_dataset(args.eval_dataset )
snake_case__ : Optional[Any] = (train_dataset, eval_dataset)
snake_case__ : Any = tokenize_and_encode(A )
# Compute the max input length for the Transformer
snake_case__ : Any = model.config.n_positions // 2 - 2
snake_case__ : List[Any] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
snake_case__ : int = min(A , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
snake_case__ : List[Any] = pre_process_datasets(A , A , A , *A )
snake_case__ , snake_case__ : Optional[Any] = tensor_datasets[0], tensor_datasets[1]
snake_case__ : Tuple = TensorDataset(*A )
snake_case__ : List[str] = RandomSampler(A )
snake_case__ : int = DataLoader(A , sampler=A , batch_size=args.train_batch_size )
snake_case__ : str = TensorDataset(*A )
snake_case__ : Dict = SequentialSampler(A )
snake_case__ : List[str] = DataLoader(A , sampler=A , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
snake_case__ : Union[str, Any] = args.max_steps
snake_case__ : Dict = args.max_steps // (len(A ) // args.gradient_accumulation_steps) + 1
else:
snake_case__ : int = len(A ) // args.gradient_accumulation_steps * args.num_train_epochs
snake_case__ : Tuple = list(model.named_parameters() )
snake_case__ : List[Any] = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
snake_case__ : Dict = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
snake_case__ : int = AdamW(A , lr=args.learning_rate , eps=args.adam_epsilon )
snake_case__ : Tuple = get_linear_schedule_with_warmup(
A , num_warmup_steps=args.warmup_steps , num_training_steps=A )
if args.do_train:
snake_case__ , snake_case__ , snake_case__ : Tuple = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
snake_case__ : str = 0
snake_case__ : Dict = 0
snake_case__ : Dict = tqdm(A , desc='Training' )
for step, batch in enumerate(A ):
snake_case__ : List[str] = tuple(t.to(A ) for t in batch )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = batch
snake_case__ : int = model(A , mc_token_ids=A , lm_labels=A , mc_labels=A )
snake_case__ : Tuple = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
snake_case__ : Union[str, Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
snake_case__ : Optional[int] = 'Training loss: {:.2e} lr: {:.2e}'.format(A , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
snake_case__ : List[Any] = model.module if hasattr(A , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
snake_case__ : Union[str, Any] = os.path.join(args.output_dir , A )
snake_case__ : List[str] = os.path.join(args.output_dir , A )
torch.save(model_to_save.state_dict() , A )
model_to_save.config.to_json_file(A )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
snake_case__ : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
snake_case__ : Optional[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(A )
if args.do_eval:
model.eval()
snake_case__ , snake_case__ : int = 0, 0
snake_case__ , snake_case__ : List[Any] = 0, 0
for batch in tqdm(A , desc='Evaluating' ):
snake_case__ : Tuple = tuple(t.to(A ) for t in batch )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = batch
with torch.no_grad():
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = model(
A , mc_token_ids=A , lm_labels=A , mc_labels=A )
snake_case__ : Union[str, Any] = mc_logits.detach().cpu().numpy()
snake_case__ : List[Any] = mc_labels.to('cpu' ).numpy()
snake_case__ : Optional[int] = accuracy(A , A )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
snake_case__ : str = eval_loss / nb_eval_steps
snake_case__ : Any = eval_accuracy / nb_eval_examples
snake_case__ : Tuple = tr_loss / nb_tr_steps if args.do_train else None
snake_case__ : Optional[int] = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
snake_case__ : Optional[int] = os.path.join(args.output_dir , 'eval_results.txt' )
with open(A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , A , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 170 | 1 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=0.6 , _UpperCAmelCase=None , ):
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = mask_ratio
lowerCAmelCase_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCAmelCase_ = (image_size // patch_size) ** 2
lowerCAmelCase_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def lowercase__ ( self):
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lowerCAmelCase_ = ViTMAEModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
lowerCAmelCase_ = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lowerCAmelCase_ = ViTMAEForPreTraining(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
lowerCAmelCase_ = model(_UpperCAmelCase)
lowerCAmelCase_ = (self.image_size // self.patch_size) ** 2
lowerCAmelCase_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
lowerCAmelCase_ = 1
lowerCAmelCase_ = ViTMAEForPreTraining(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
lowerCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
lowerCAmelCase_ = model(_UpperCAmelCase)
lowerCAmelCase_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def lowercase__ ( self):
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( A , A , unittest.TestCase ):
'''simple docstring'''
a :Tuple = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a :Union[str, Any] = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
a :List[str] = False
a :Any = False
a :List[str] = False
a :List[str] = False
def lowercase__ ( self):
lowerCAmelCase_ = ViTMAEModelTester(self)
lowerCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37)
def lowercase__ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''')
def lowercase__ ( self):
pass
def lowercase__ ( self):
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_UpperCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
lowerCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear))
def lowercase__ ( self):
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_UpperCAmelCase)
lowerCAmelCase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def lowercase__ ( self):
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def lowercase__ ( self):
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase)
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
# make masks reproducible
np.random.seed(2)
lowerCAmelCase_ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
lowerCAmelCase_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
lowerCAmelCase_ = torch.from_numpy(_UpperCAmelCase)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCAmelCase_ = pt_noise
super().check_pt_tf_models(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def lowercase__ ( self):
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
lowerCAmelCase_ = outputs[0].cpu().numpy()
lowerCAmelCase_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase)
lowerCAmelCase_ = model_class.from_pretrained(_UpperCAmelCase)
model.to(_UpperCAmelCase)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
# Make sure we don't have nans
lowerCAmelCase_ = after_outputs[0].cpu().numpy()
lowerCAmelCase_ = 0
lowerCAmelCase_ = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_UpperCAmelCase , 1E-5)
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''')
def lowercase__ ( self):
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''')
def lowercase__ ( self):
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''')
def lowercase__ ( self):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''')
def lowercase__ ( self):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def lowercase__ ( self):
pass
@slow
def lowercase__ ( self):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = ViTMAEModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''') if is_vision_available() else None
@slow
def lowercase__ ( self):
# make random mask reproducible across the PT and TF model
np.random.seed(2)
lowerCAmelCase_ = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''').to(_UpperCAmelCase)
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='''pt''').to(_UpperCAmelCase)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCAmelCase_ = ViTMAEConfig()
lowerCAmelCase_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
lowerCAmelCase_ = np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**_UpperCAmelCase , noise=torch.from_numpy(_UpperCAmelCase).to(device=_UpperCAmelCase))
# verify the logits
lowerCAmelCase_ = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
lowerCAmelCase_ = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]])
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_UpperCAmelCase) , atol=1E-4))
| 413 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_snake_case = logging.get_logger(__name__)
_snake_case = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase=None , **_UpperCAmelCase):
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''')
lowerCAmelCase_ = model
lowerCAmelCase_ = kwargs.get('''model_save_dir''' , _UpperCAmelCase)
lowerCAmelCase_ = kwargs.get('''latest_model_name''' , _UpperCAmelCase)
def __call__( self , **_UpperCAmelCase):
lowerCAmelCase_ = {k: np.array(_UpperCAmelCase) for k, v in kwargs.items()}
return self.model.run(_UpperCAmelCase , _UpperCAmelCase)
@staticmethod
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None):
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''')
lowerCAmelCase_ = '''CPUExecutionProvider'''
return ort.InferenceSession(_UpperCAmelCase , providers=[provider] , sess_options=_UpperCAmelCase)
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase):
lowerCAmelCase_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowerCAmelCase_ = self.model_save_dir.joinpath(self.latest_model_name)
lowerCAmelCase_ = Path(_UpperCAmelCase).joinpath(_UpperCAmelCase)
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase)
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowerCAmelCase_ = self.model_save_dir.joinpath(_UpperCAmelCase)
if src_path.exists():
lowerCAmelCase_ = Path(_UpperCAmelCase).joinpath(_UpperCAmelCase)
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase)
except shutil.SameFileError:
pass
def lowercase__ ( self , _UpperCAmelCase , **_UpperCAmelCase , ):
if os.path.isfile(_UpperCAmelCase):
logger.error(f'Provided path ({save_directory}) should be a directory, not a file')
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
# saving model weights/files
self._save_pretrained(_UpperCAmelCase , **_UpperCAmelCase)
@classmethod
def lowercase__ ( cls , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
lowerCAmelCase_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCAmelCase):
lowerCAmelCase_ = OnnxRuntimeModel.load_model(
os.path.join(_UpperCAmelCase , _UpperCAmelCase) , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase)
lowerCAmelCase_ = Path(_UpperCAmelCase)
# load model from hub
else:
# download model
lowerCAmelCase_ = hf_hub_download(
repo_id=_UpperCAmelCase , filename=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , )
lowerCAmelCase_ = Path(_UpperCAmelCase).parent
lowerCAmelCase_ = Path(_UpperCAmelCase).name
lowerCAmelCase_ = OnnxRuntimeModel.load_model(_UpperCAmelCase , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase)
return cls(model=_UpperCAmelCase , **_UpperCAmelCase)
@classmethod
def lowercase__ ( cls , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
lowerCAmelCase_ = None
if len(str(_UpperCAmelCase).split('''@''')) == 2:
lowerCAmelCase_ , lowerCAmelCase_ = model_id.split('''@''')
return cls._from_pretrained(
model_id=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , **_UpperCAmelCase , )
| 413 | 1 |
import datasets
from .evaluate import evaluate
_SCREAMING_SNAKE_CASE : Union[str, Any] = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
_SCREAMING_SNAKE_CASE : Union[str, Any] = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
_SCREAMING_SNAKE_CASE : Optional[Any] = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def a_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def a_ ( self , __snake_case , __snake_case ):
snake_case = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
snake_case = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
snake_case = evaluate(dataset=__snake_case , predictions=__snake_case )
return score
| 550 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=7 , __snake_case=3 , __snake_case=3_0 , __snake_case=4_0_0 , __snake_case=True , __snake_case=None , __snake_case=True , __snake_case=1 / 2_5_5 , __snake_case=True , __snake_case=[0.5, 0.5, 0.5] , __snake_case=[0.5, 0.5, 0.5] , __snake_case=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = do_rescale
snake_case = rescale_factor
snake_case = do_normalize
snake_case = image_mean
snake_case = image_std
snake_case = do_pad
def a_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def a_ ( self , __snake_case , __snake_case=False ):
if not batched:
snake_case = image_inputs[0]
if isinstance(__snake_case , Image.Image ):
snake_case , snake_case = image.size
else:
snake_case , snake_case = image.shape[1], image.shape[2]
if w < h:
snake_case = int(self.size['''shortest_edge'''] * h / w )
snake_case = self.size['''shortest_edge''']
elif w > h:
snake_case = self.size['''shortest_edge''']
snake_case = int(self.size['''shortest_edge'''] * w / h )
else:
snake_case = self.size['''shortest_edge''']
snake_case = self.size['''shortest_edge''']
else:
snake_case = []
for image in image_inputs:
snake_case , snake_case = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case = max(__snake_case , key=lambda __snake_case : item[0] )[0]
snake_case = max(__snake_case , key=lambda __snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = DetrImageProcessor if is_vision_available() else None
def a_ ( self ):
snake_case = DetrImageProcessingTester(self )
@property
def a_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self ):
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(__snake_case , '''image_std''' ) )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_rescale''' ) )
self.assertTrue(hasattr(__snake_case , '''rescale_factor''' ) )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
self.assertTrue(hasattr(__snake_case , '''do_pad''' ) )
def a_ ( self ):
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __snake_case )
snake_case = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__snake_case )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , __snake_case )
def a_ ( self ):
pass
def a_ ( self ):
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
snake_case = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ ( self ):
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ ( self ):
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
snake_case , snake_case = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a_ ( self ):
# prepare image and target
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
snake_case = json.loads(f.read() )
snake_case = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
snake_case = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
snake_case = image_processing(images=__snake_case , annotations=__snake_case , return_tensors='''pt''' )
# verify pixel values
snake_case = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , __snake_case )
snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
snake_case = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __snake_case ) )
# verify boxes
snake_case = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __snake_case )
snake_case = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __snake_case , atol=1E-3 ) )
# verify image_id
snake_case = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __snake_case ) )
# verify is_crowd
snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __snake_case ) )
# verify class_labels
snake_case = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __snake_case ) )
# verify orig_size
snake_case = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __snake_case ) )
# verify size
snake_case = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __snake_case ) )
@slow
def a_ ( self ):
# prepare image, target and masks_path
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
snake_case = json.loads(f.read() )
snake_case = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
snake_case = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
snake_case = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
snake_case = image_processing(images=__snake_case , annotations=__snake_case , masks_path=__snake_case , return_tensors='''pt''' )
# verify pixel values
snake_case = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , __snake_case )
snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
snake_case = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __snake_case ) )
# verify boxes
snake_case = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __snake_case )
snake_case = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __snake_case , atol=1E-3 ) )
# verify image_id
snake_case = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __snake_case ) )
# verify is_crowd
snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __snake_case ) )
# verify class_labels
snake_case = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __snake_case ) )
# verify masks
snake_case = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __snake_case )
# verify orig_size
snake_case = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __snake_case ) )
# verify size
snake_case = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __snake_case ) )
| 550 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase = 2 , __lowerCAmelCase = 1 , __lowerCAmelCase = 3 , ) -> Any:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
return (pow(lowercase__ , 2 ) + step) % modulus
for _ in range(lowercase__ ):
# These track the position within the cycle detection logic.
SCREAMING_SNAKE_CASE__ : List[str] = seed
SCREAMING_SNAKE_CASE__ : Optional[Any] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
SCREAMING_SNAKE_CASE__ : List[str] = rand_fn(lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : str = rand_fn(lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = rand_fn(lowercase__ , lowercase__ , lowercase__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
SCREAMING_SNAKE_CASE__ : Any = gcd(hare - tortoise , lowercase__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
SCREAMING_SNAKE_CASE__ : str = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
a :str = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
a :Tuple = parser.parse_args()
a :int = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'{args.num} is probably prime')
else:
a :Tuple = args.num // divisor
print(f'{args.num} = {divisor} * {quotient}')
| 701 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Tuple = (DDPMScheduler,)
def _a ( self , **_a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_a )
return config
def _a ( self ) -> str:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def _a ( self ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_a )
def _a ( self ) -> Any:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_a )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def _a ( self ) -> int:
"""simple docstring"""
self.check_over_configs(thresholding=_a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , )
def _a ( self ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def _a ( self ) -> str:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Dict = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : Any = len(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : str = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_a , _a )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ : int = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE__ : str = pred_prev_sample
SCREAMING_SNAKE_CASE__ : str = torch.sum(torch.abs(_a ) )
SCREAMING_SNAKE_CASE__ : Any = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""" )
SCREAMING_SNAKE_CASE__ : List[str] = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : Dict = len(_a )
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ : int = model(_a , _a )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ : List[str] = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE__ : Tuple = pred_prev_sample
SCREAMING_SNAKE_CASE__ : Any = torch.sum(torch.abs(_a ) )
SCREAMING_SNAKE_CASE__ : int = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Dict = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler.timesteps
for i, timestep in enumerate(_a ):
if i == len(_a ) - 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = -1
else:
SCREAMING_SNAKE_CASE__ : Tuple = timesteps[i + 1]
SCREAMING_SNAKE_CASE__ : int = scheduler.previous_timestep(_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prev_t.item()
self.assertEqual(_a , _a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : List[str] = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(_a , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : List[str] = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : int = [100, 87, 50, 1, 0]
SCREAMING_SNAKE_CASE__ : List[str] = len(_a )
with self.assertRaises(_a , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_a )
| 12 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def a_ ( __UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
snake_case: List[Any] =[
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def a_ ( __UpperCAmelCase ) -> Dict:
"""simple docstring"""
snake_case , snake_case: Dict =emb.weight.shape
snake_case: int =nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
snake_case: str =emb.weight.data
return lin_layer
def a_ ( __UpperCAmelCase ) -> int:
"""simple docstring"""
snake_case: str =torch.load(snake_case_ , map_location='cpu' )
snake_case: int =mam_aaa['args'] or mam_aaa['cfg']['model']
snake_case: Dict =mam_aaa['model']
remove_ignore_keys_(snake_case_ )
snake_case: Tuple =state_dict['encoder.embed_tokens.weight'].shape[0]
snake_case: int =MaMaaaConfig(
vocab_size=snake_case_ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
snake_case: Union[str, Any] =state_dict['decoder.embed_tokens.weight']
snake_case: Tuple =MaMaaaForConditionalGeneration(snake_case_ )
model.model.load_state_dict(snake_case_ , strict=snake_case_ )
snake_case: int =make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
a = parser.parse_args()
a = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 350 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCamelCase_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
a = dict(zip(__lowerCamelCase ,range(len(__lowerCamelCase ) ) ) )
a = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
a = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
a = tempfile.mkdtemp()
a = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
a = os.path.join(self.tmpdirname ,__lowerCamelCase )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + '''\n''' )
with open(self.feature_extraction_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + '''\n''' )
# load decoder from hub
a = '''hf-internal-testing/ngram-beam-search-decoder'''
def SCREAMING_SNAKE_CASE_ ( self : List[str] ,**__lowerCamelCase : List[str] ):
'''simple docstring'''
a = self.add_kwargs_tokens_map.copy()
kwargs.update(__lowerCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ,**__lowerCamelCase : Dict ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : str ,**__lowerCamelCase : Any ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
a = self.get_tokenizer()
a = self.get_feature_extractor()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase ,feature_extractor=__lowerCamelCase ,decoder=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
a = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__lowerCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,__lowerCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
a = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(__lowerCamelCase ,'''include''' ):
WavaVecaProcessorWithLM(
tokenizer=__lowerCamelCase ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = self.get_feature_extractor()
a = self.get_tokenizer()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase ,feature_extractor=__lowerCamelCase ,decoder=__lowerCamelCase )
a = floats_list((3, 10_00) )
a = feature_extractor(__lowerCamelCase ,return_tensors='''np''' )
a = processor(__lowerCamelCase ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
a = self.get_feature_extractor()
a = self.get_tokenizer()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase ,feature_extractor=__lowerCamelCase ,decoder=__lowerCamelCase )
a = '''This is a test string'''
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : str=(2, 10, 16) ,__lowerCamelCase : Dict=77 ):
'''simple docstring'''
np.random.seed(__lowerCamelCase )
return np.random.rand(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = self.get_feature_extractor()
a = self.get_tokenizer()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase ,feature_extractor=__lowerCamelCase ,decoder=__lowerCamelCase )
a = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
a = processor.decode(__lowerCamelCase )
a = decoder.decode_beams(__lowerCamelCase )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : str ):
'''simple docstring'''
a = self.get_feature_extractor()
a = self.get_tokenizer()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase ,feature_extractor=__lowerCamelCase ,decoder=__lowerCamelCase )
a = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
a = processor.batch_decode(__lowerCamelCase )
else:
with get_context(__lowerCamelCase ).Pool() as pool:
a = processor.batch_decode(__lowerCamelCase ,__lowerCamelCase )
a = list(__lowerCamelCase )
with get_context('''fork''' ).Pool() as p:
a = decoder.decode_beams_batch(__lowerCamelCase ,__lowerCamelCase )
a , a , a = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__lowerCamelCase ,decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] ,decoded_processor.text )
self.assertListEqual(__lowerCamelCase ,decoded_processor.logit_score )
self.assertListEqual(__lowerCamelCase ,decoded_processor.lm_score )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
a = self.get_feature_extractor()
a = self.get_tokenizer()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase ,feature_extractor=__lowerCamelCase ,decoder=__lowerCamelCase )
a = self._get_dummy_logits()
a = 15
a = -20.0
a = -4.0
a = processor.batch_decode(
__lowerCamelCase ,beam_width=__lowerCamelCase ,beam_prune_logp=__lowerCamelCase ,token_min_logp=__lowerCamelCase ,)
a = decoded_processor_out.text
a = list(__lowerCamelCase )
with get_context('''fork''' ).Pool() as pool:
a = decoder.decode_beams_batch(
__lowerCamelCase ,__lowerCamelCase ,beam_width=__lowerCamelCase ,beam_prune_logp=__lowerCamelCase ,token_min_logp=__lowerCamelCase ,)
a = [d[0][0] for d in decoded_decoder_out]
a = [d[0][2] for d in decoded_decoder_out]
a = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] ,__lowerCamelCase )
self.assertTrue(np.array_equal(__lowerCamelCase ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] ,__lowerCamelCase ,atol=1e-3 ) )
self.assertTrue(np.array_equal(__lowerCamelCase ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] ,__lowerCamelCase ,atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
a = self.get_feature_extractor()
a = self.get_tokenizer()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase ,feature_extractor=__lowerCamelCase ,decoder=__lowerCamelCase )
a = self._get_dummy_logits()
a = 2.0
a = 5.0
a = -20.0
a = True
a = processor.batch_decode(
__lowerCamelCase ,alpha=__lowerCamelCase ,beta=__lowerCamelCase ,unk_score_offset=__lowerCamelCase ,lm_score_boundary=__lowerCamelCase ,)
a = decoded_processor_out.text
a = list(__lowerCamelCase )
decoder.reset_params(
alpha=__lowerCamelCase ,beta=__lowerCamelCase ,unk_score_offset=__lowerCamelCase ,lm_score_boundary=__lowerCamelCase ,)
with get_context('''fork''' ).Pool() as pool:
a = decoder.decode_beams_batch(
__lowerCamelCase ,__lowerCamelCase ,)
a = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] ,__lowerCamelCase )
a = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-20.0 )
self.assertEqual(lm_model.score_boundary ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
a = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
a = processor.decoder.model_container[processor.decoder._model_key]
a = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
a = os.listdir(__lowerCamelCase )
a = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
a = snapshot_download('''hf-internal-testing/processor_with_lm''' )
a = WavaVecaProcessorWithLM.from_pretrained(__lowerCamelCase )
a = processor.decoder.model_container[processor.decoder._model_key]
a = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
a = os.listdir(__lowerCamelCase )
a = os.listdir(__lowerCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
a = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
a = floats_list((3, 10_00) )
a = processor_wavaveca(__lowerCamelCase ,return_tensors='''np''' )
a = processor_auto(__lowerCamelCase ,return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1e-2 )
a = self._get_dummy_logits()
a = processor_wavaveca.batch_decode(__lowerCamelCase )
a = processor_auto.batch_decode(__lowerCamelCase )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
a = self.get_feature_extractor()
a = self.get_tokenizer()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase ,feature_extractor=__lowerCamelCase ,decoder=__lowerCamelCase )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,)
@staticmethod
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
a = [d[key] for d in offsets]
return retrieved_list
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
a = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
a = self._get_dummy_logits()[0]
a = processor.decode(__lowerCamelCase ,output_word_offsets=__lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__lowerCamelCase ,__lowerCamelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''end_offset''' ) ,[1, 3, 5] )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
a = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
a = self._get_dummy_logits()
a = processor.batch_decode(__lowerCamelCase ,output_word_offsets=__lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__lowerCamelCase ,__lowerCamelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(__lowerCamelCase ,'''word''' ) ) for o in outputs['''word_offsets''']] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''end_offset''' ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
import torch
a = load_dataset('''common_voice''' ,'''en''' ,split='''train''' ,streaming=__lowerCamelCase )
a = ds.cast_column('''audio''' ,datasets.Audio(sampling_rate=1_60_00 ) )
a = iter(__lowerCamelCase )
a = next(__lowerCamelCase )
a = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
a = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
a = processor(sample['''audio''']['''array'''] ,return_tensors='''pt''' ).input_values
with torch.no_grad():
a = model(__lowerCamelCase ).logits.cpu().numpy()
a = processor.decode(logits[0] ,output_word_offsets=__lowerCamelCase )
a = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
a = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
a = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(__lowerCamelCase ,'''word''' ) ) ,__lowerCamelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(__lowerCamelCase ,'''word''' ) ) ,output.text )
# output times
a = torch.tensor(self.get_from_offsets(__lowerCamelCase ,'''start_time''' ) )
a = torch.tensor(self.get_from_offsets(__lowerCamelCase ,'''end_time''' ) )
# fmt: off
a = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
a = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__lowerCamelCase ,__lowerCamelCase ,atol=0.01 ) )
self.assertTrue(torch.allclose(__lowerCamelCase ,__lowerCamelCase ,atol=0.01 ) )
| 387 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( __lowerCamelCase:int = 5_0 ):
'''simple docstring'''
__magic_name__ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 709 |
"""simple docstring"""
def _lowerCAmelCase ( __lowerCamelCase:list ):
'''simple docstring'''
__magic_name__ = len(__lowerCamelCase )
for i in range(1 , __lowerCamelCase ):
__magic_name__ = collection[i]
__magic_name__ = 0
__magic_name__ = i - 1
while low <= high:
__magic_name__ = (low + high) // 2
if val < collection[mid]:
__magic_name__ = mid - 1
else:
__magic_name__ = mid + 1
for j in range(__lowerCamelCase , __lowerCamelCase , -1 ):
__magic_name__ = collection[j - 1]
__magic_name__ = val
return collection
if __name__ == "__main__":
lowercase = input('''Enter numbers separated by a comma:\n''').strip()
lowercase = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 468 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Optional[Any] = tempfile.mkdtemp()
UpperCamelCase :Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
UpperCamelCase :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCamelCase :Tuple = {
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
'''do_convert_rgb''': True,
}
UpperCamelCase :Tuple = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> int:
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase :Dict = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :str = self.get_tokenizer()
UpperCamelCase :Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase :Optional[int] = self.get_image_processor()
UpperCamelCase :Union[str, Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase :str = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase :Optional[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Optional[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase :Tuple = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
UpperCamelCase :List[Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :List[Any] = self.get_image_processor()
UpperCamelCase :Tuple = self.get_tokenizer()
UpperCamelCase :Tuple = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = self.prepare_image_inputs()
UpperCamelCase :Optional[int] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
UpperCamelCase :List[str] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :int = self.get_image_processor()
UpperCamelCase :str = self.get_tokenizer()
UpperCamelCase :Dict = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = '''Alexandra,T-shirt的价格是15便士。'''
UpperCamelCase :Optional[int] = processor(text=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Dict = self.get_image_processor()
UpperCamelCase :int = self.get_tokenizer()
UpperCamelCase :List[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = '''Alexandra,T-shirt的价格是15便士。'''
UpperCamelCase :Optional[Any] = self.prepare_image_inputs()
UpperCamelCase :Any = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Tuple = self.get_image_processor()
UpperCamelCase :int = self.get_tokenizer()
UpperCamelCase :List[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase :Union[str, Any] = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Dict = self.get_image_processor()
UpperCamelCase :Tuple = self.get_tokenizer()
UpperCamelCase :Tuple = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = '''Alexandra,T-shirt的价格是15便士。'''
UpperCamelCase :List[str] = self.prepare_image_inputs()
UpperCamelCase :List[str] = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 658 |
from __future__ import annotations
from collections.abc import Callable
def _A ( SCREAMING_SNAKE_CASE__ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int = 100 , ):
UpperCamelCase :Optional[Any] = x_start
UpperCamelCase :Any = fnc(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = 0.0
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCamelCase :Any = (x_end - x_start) / steps + xa
UpperCamelCase :Dict = fnc(SCREAMING_SNAKE_CASE__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCamelCase :Optional[int] = xa
UpperCamelCase :List[str] = fxa
return area
if __name__ == "__main__":
def _A ( SCREAMING_SNAKE_CASE__ : int ):
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
__snake_case = 10
while i <= 10_00_00:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 658 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
snake_case__ : Optional[Any] = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
snake_case__ : str = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
@lru_cache()
def _lowerCamelCase ( ):
"""simple docstring"""
UpperCAmelCase_ : Any = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
UpperCAmelCase_ : int = bs[:]
UpperCAmelCase_ : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase_ )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ : str = [chr(lowerCamelCase_ ) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_ ) )
def _lowerCamelCase ( lowerCamelCase_ : Dict ):
"""simple docstring"""
UpperCAmelCase_ : int = set()
UpperCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Optional[int] = char
return pairs
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :List[str] = VOCAB_FILES_NAMES
lowerCamelCase_ :Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ :int = ['''input_ids''', '''attention_mask''']
def __init__( self , snake_case_ , snake_case_ , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , **snake_case_ , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token
UpperCAmelCase_ : Tuple = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token
UpperCAmelCase_ : Optional[Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token
UpperCAmelCase_ : Optional[Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token
UpperCAmelCase_ : int = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token
UpperCAmelCase_ : Any = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : List[str] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , )
with open(snake_case_ , encoding='utf-8' ) as vocab_handle:
UpperCAmelCase_ : Any = json.load(snake_case_ )
UpperCAmelCase_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ : str = errors # how to handle errors in decoding
UpperCAmelCase_ : List[str] = bytes_to_unicode()
UpperCAmelCase_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case_ , encoding='utf-8' ) as merges_handle:
UpperCAmelCase_ : Optional[int] = merges_handle.read().split('\n' )[1:-1]
UpperCAmelCase_ : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ : List[Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCAmelCase_ : str = {}
UpperCAmelCase_ : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ : Union[str, Any] = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return len(self.encoder )
def _UpperCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Union[str, Any] = tuple(snake_case_ )
UpperCAmelCase_ : str = get_pairs(snake_case_ )
if not pairs:
return token
while True:
UpperCAmelCase_ : str = min(snake_case_ , key=lambda snake_case_ : self.bpe_ranks.get(snake_case_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : Any = bigram
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Union[str, Any] = 0
while i < len(snake_case_ ):
try:
UpperCAmelCase_ : Dict = word.index(snake_case_ , snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : Tuple = j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : Optional[int] = tuple(snake_case_ )
UpperCAmelCase_ : Optional[Any] = new_word
if len(snake_case_ ) == 1:
break
else:
UpperCAmelCase_ : Dict = get_pairs(snake_case_ )
UpperCAmelCase_ : Tuple = ' '.join(snake_case_ )
UpperCAmelCase_ : Dict = word
return word
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = []
for token in re.findall(self.pat , snake_case_ ):
UpperCAmelCase_ : Optional[int] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case_ ).split(' ' ) )
return bpe_tokens
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
return self.decoder.get(snake_case_ )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : int = ''.join(snake_case_ )
UpperCAmelCase_ : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def _UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
if not os.path.isdir(snake_case_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : str = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase_ : Optional[Any] = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(snake_case_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + '\n' )
UpperCAmelCase_ : List[Any] = 0
with open(snake_case_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
UpperCAmelCase_ : Tuple = token_index
writer.write(' '.join(snake_case_ ) + '\n' )
index += 1
return vocab_file, merge_file
def _UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Dict = [self.cls_token_id]
UpperCAmelCase_ : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def _UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
UpperCAmelCase_ : str = [self.sep_token_id]
UpperCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self , snake_case_ , snake_case_=False , **snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case_ ) > 0 and not text[0].isspace()):
UpperCAmelCase_ : Dict = ' ' + text
return (text, kwargs)
| 389 | '''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : Dict = '''Hello world! cécé herlolip'''
def _lowerCamelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : bool ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = FairseqRobertaModel.from_pretrained(lowerCamelCase_ )
roberta.eval() # disable dropout
UpperCAmelCase_ : Optional[Any] = roberta.model.encoder.sentence_encoder
UpperCAmelCase_ : str = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
UpperCAmelCase_ : Optional[int] = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , lowerCamelCase_ )
UpperCAmelCase_ : str = XLMRobertaXLForSequenceClassification(lowerCamelCase_ ) if classification_head else XLMRobertaXLForMaskedLM(lowerCamelCase_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase_ : List[Any] = roberta_sent_encoder.embed_tokens.weight
UpperCAmelCase_ : Union[str, Any] = roberta_sent_encoder.embed_positions.weight
UpperCAmelCase_ : Tuple = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
UpperCAmelCase_ : int = roberta_sent_encoder.layer_norm.weight
UpperCAmelCase_ : Optional[Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase_ : BertLayer = model.roberta.encoder.layer[i]
UpperCAmelCase_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
UpperCAmelCase_ : RobertaAttention = layer.attention
UpperCAmelCase_ : str = roberta_layer.self_attn_layer_norm.weight
UpperCAmelCase_ : Tuple = roberta_layer.self_attn_layer_norm.bias
# self attention
UpperCAmelCase_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
UpperCAmelCase_ : List[Any] = roberta_layer.self_attn.q_proj.weight
UpperCAmelCase_ : str = roberta_layer.self_attn.q_proj.bias
UpperCAmelCase_ : List[str] = roberta_layer.self_attn.k_proj.weight
UpperCAmelCase_ : Any = roberta_layer.self_attn.k_proj.bias
UpperCAmelCase_ : List[Any] = roberta_layer.self_attn.v_proj.weight
UpperCAmelCase_ : List[Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
UpperCAmelCase_ : Tuple = roberta_layer.self_attn.out_proj.weight
UpperCAmelCase_ : str = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
UpperCAmelCase_ : Any = roberta_layer.final_layer_norm.weight
UpperCAmelCase_ : int = roberta_layer.final_layer_norm.bias
# intermediate
UpperCAmelCase_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ : Any = roberta_layer.fca.weight
UpperCAmelCase_ : Optional[int] = roberta_layer.fca.bias
# output
UpperCAmelCase_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ : Any = roberta_layer.fca.weight
UpperCAmelCase_ : Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
UpperCAmelCase_ : Dict = roberta.model.classification_heads['mnli'].dense.weight
UpperCAmelCase_ : Dict = roberta.model.classification_heads['mnli'].dense.bias
UpperCAmelCase_ : List[str] = roberta.model.classification_heads['mnli'].out_proj.weight
UpperCAmelCase_ : Dict = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
UpperCAmelCase_ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight
UpperCAmelCase_ : Optional[int] = roberta.model.encoder.lm_head.dense.bias
UpperCAmelCase_ : Tuple = roberta.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase_ : Dict = roberta.model.encoder.lm_head.weight
UpperCAmelCase_ : List[Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase_ : torch.Tensor = roberta.encode(lowerCamelCase_ ).unsqueeze(0 ) # batch of size 1
UpperCAmelCase_ : Any = model(lowerCamelCase_ )[0]
if classification_head:
UpperCAmelCase_ : Tuple = roberta.model.classification_heads['mnli'](roberta.extract_features(lowerCamelCase_ ) )
else:
UpperCAmelCase_ : Any = roberta.model(lowerCamelCase_ )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase_ : List[str] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
UpperCAmelCase_ : List[Any] = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(lowerCamelCase_ ).mkdir(parents=lowerCamelCase_ , exist_ok=lowerCamelCase_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
snake_case__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
snake_case__ : List[str] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 389 | 1 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_lowerCamelCase = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,*_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,**_snake_case ):
super().__init__(*_snake_case ,**_snake_case )
UpperCAmelCase_ : int = eval_examples
UpperCAmelCase_ : Optional[int] = post_process_function
UpperCAmelCase_ : int = quant_trainer_args
UpperCAmelCase_ : Any = 1_28 # default number of calibration samples
def UpperCamelCase__ ( self ,_snake_case=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
UpperCAmelCase_ : Union[str, Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
UpperCAmelCase_ : Dict = self._remove_unused_columns(_snake_case ,description="Calibration" )
return DataLoader(
_snake_case ,batch_size=self.args.eval_batch_size ,collate_fn=self.data_collator ,drop_last=self.args.dataloader_drop_last ,num_workers=self.args.dataloader_num_workers ,pin_memory=self.args.dataloader_pin_memory ,shuffle=_snake_case ,)
def UpperCamelCase__ ( self ,_snake_case=None ):
UpperCAmelCase_ : Any = self.train_dataset if calib_dataset is None else calib_dataset
UpperCAmelCase_ : Any = self.get_calib_dataloader(_snake_case )
UpperCAmelCase_ : int = self.model
quant_trainer.configure_model(_snake_case ,self.quant_trainer_args ,calib=_snake_case )
model.eval()
quant_trainer.enable_calibration(_snake_case )
logger.info("***** Running calibration *****" )
logger.info(f''' Num examples = {self.calib_num}''' )
logger.info(f''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(_snake_case ):
# Prediction step
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.prediction_step(_snake_case ,_snake_case ,prediction_loss_only=_snake_case )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(_snake_case ,self.quant_trainer_args )
UpperCAmelCase_ : Dict = model
def UpperCamelCase__ ( self ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case = "eval" ):
UpperCAmelCase_ : str = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase_ : Any = self.get_eval_dataloader(_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ : List[Any] = self.compute_metrics
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_ : List[str] = eval_loop(
_snake_case ,description="Evaluation" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_snake_case ,)
finally:
UpperCAmelCase_ : Tuple = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
UpperCAmelCase_ : List[str] = self.post_process_function(_snake_case ,_snake_case ,output.predictions )
UpperCAmelCase_ : int = self.compute_metrics(_snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
UpperCAmelCase_ : Tuple = metrics.pop(_snake_case )
self.log(_snake_case )
else:
UpperCAmelCase_ : Tuple = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase_ : List[Any] = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,_snake_case )
return metrics
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case = "test" ):
UpperCAmelCase_ : int = self.get_test_dataloader(_snake_case )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ : Any = self.compute_metrics
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Dict = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_ : Dict = eval_loop(
_snake_case ,description="Prediction" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_snake_case ,)
finally:
UpperCAmelCase_ : Optional[Any] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase_ : Union[str, Any] = self.post_process_function(_snake_case ,_snake_case ,output.predictions ,"predict" )
UpperCAmelCase_ : str = self.compute_metrics(_snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
UpperCAmelCase_ : Optional[int] = metrics.pop(_snake_case )
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=_snake_case )
def UpperCamelCase__ ( self ,_snake_case="./" ):
UpperCAmelCase_ : int = self.eval_dataset
UpperCAmelCase_ : Union[str, Any] = self.get_eval_dataloader(_snake_case )
UpperCAmelCase_ : str = next(iter(_snake_case ) )
# saving device - to make it consistent
UpperCAmelCase_ : Optional[int] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
UpperCAmelCase_ : Any = tuple(v.to(_snake_case ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : int = self.model.to(_snake_case )
model.eval()
model.float()
UpperCAmelCase_ : Optional[Any] = model.module if hasattr(_snake_case ,"module" ) else model
quant_trainer.configure_model(_snake_case ,self.quant_trainer_args )
UpperCAmelCase_ : List[str] = os.path.join(_snake_case ,"model.onnx" )
logger.info(f'''exporting model to {output_model_file}''' )
UpperCAmelCase_ : List[Any] = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
_snake_case ,_snake_case ,_snake_case ,export_params=_snake_case ,opset_version=13 ,do_constant_folding=_snake_case ,input_names=["input_ids", "attention_mask", "token_type_ids"] ,output_names=["output_start_logits", "output_end_logits"] ,dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
} ,verbose=_snake_case ,)
logger.info("onnx export finished" )
| 71 |
'''simple docstring'''
from statistics import mean, stdev
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Dict = min(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = max(_SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data]
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Tuple = mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = stdev(_SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
| 71 | 1 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
__lowercase = len(bin(A__ )[3:] )
__lowercase = bin(abs(A__ ) - (1 << binary_number_length) )[3:]
__lowercase = (
(
'''1'''
+ '''0''' * (binary_number_length - len(A__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
lowerCAmelCase__ = {
'''google/rembert''': 256,
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str ,lowercase__ : Optional[Any] ,lowercase__ : List[str]=False ,lowercase__ : Dict=True ,lowercase__ : List[str]=True ,lowercase__ : Dict="[CLS]" ,lowercase__ : Union[str, Any]="[SEP]" ,lowercase__ : List[str]="[UNK]" ,lowercase__ : int="[SEP]" ,lowercase__ : List[str]="[PAD]" ,lowercase__ : Optional[int]="[CLS]" ,lowercase__ : List[Any]="[MASK]" ,**lowercase__ : int ,):
super().__init__(
do_lower_case=lowercase__ ,remove_space=lowercase__ ,keep_accents=lowercase__ ,bos_token=lowercase__ ,eos_token=lowercase__ ,unk_token=lowercase__ ,sep_token=lowercase__ ,pad_token=lowercase__ ,cls_token=lowercase__ ,mask_token=lowercase__ ,**lowercase__ ,)
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor()
self.sp_model.Load(lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : str ,lowercase__ : Optional[int] ):
__lowercase = d
__lowercase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[str] ,lowercase__ : List[Any]=False ):
__lowercase = self.sp_model.EncodeAsPieces(lowercase__ )
return pieces
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ):
return self.sp_model.PieceToId(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : str ):
return self.sp_model.IdToPiece(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Tuple ):
__lowercase = self.sp_model.decode_pieces(lowercase__ )
return out_string
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ,lowercase__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
return [1] + ([0] * len(lowercase__ )) + [1]
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowercase__ ) )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file ,lowercase__ )
return (out_vocab_file,)
| 624 | 1 |
'''simple docstring'''
def a_ ( __snake_case : int = 100_0000 ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =set(range(3 , __snake_case , 2 ) )
primes.add(2 )
for p in range(3 , __snake_case , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __snake_case , __snake_case ) ) )
lowerCamelCase_ =[float(__snake_case ) for n in range(limit + 1 )]
for p in primes:
for n in range(__snake_case , limit + 1 , __snake_case ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 676 |
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowercase__ = HfApi()
lowercase__ = {}
# fmt: off
lowercase__ = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
lowercase__ = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
lowercase__ = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
lowercase__ = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
lowercase__ = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
lowercase__ = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
lowercase__ = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
lowercase__ = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
lowercase__ = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
lowercase__ = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
lowercase__ = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
lowercase__ = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
lowercase__ = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
lowercase__ = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
lowercase__ = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
lowercase__ = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowercase__ = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('''CompVis'''):
lowercase__ = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
lowercase__ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowercase__ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowercase__ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowercase__ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 508 | 0 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> None:
"""simple docstring"""
A = len(UpperCamelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(UpperCamelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , UpperCamelCase__ , UpperCamelCase__ , )
def __snake_case ( UpperCamelCase__ ) -> None:
"""simple docstring"""
A = []
depth_first_search([] , [] , [] , UpperCamelCase__ , UpperCamelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(UpperCamelCase__ )
print('' )
print(len(UpperCamelCase__ ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 91 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
if not nums:
return 0
A = nums[0]
A = 0
for num in nums[1:]:
A , A = (
max_excluding + num,
max(UpperCamelCase__ , UpperCamelCase__ ),
)
return max(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91 | 1 |
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__SCREAMING_SNAKE_CASE : int ='.'
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] =os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
__SCREAMING_SNAKE_CASE : Union[str, Any] =[]
__SCREAMING_SNAKE_CASE : Optional[int] =[]
with open(doctest_file_path) as fp:
for line in fp:
__SCREAMING_SNAKE_CASE : Optional[int] =line.strip()
__SCREAMING_SNAKE_CASE : Union[str, Any] =os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__SCREAMING_SNAKE_CASE : Any ='\n'.join(non_existent_paths)
raise ValueError(f"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 135 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[Any] ={
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str =[
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 135 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__lowerCamelCase = None
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__lowerCamelCase = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
__lowerCamelCase = {
"google/fnet-base": 5_12,
"google/fnet-large": 5_12,
}
__lowerCamelCase = "▁"
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Tuple = VOCAB_FILES_NAMES
lowerCAmelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : Union[str, Any] = ['input_ids', 'token_type_ids']
lowerCAmelCase__ : str = FNetTokenizer
def __init__( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase="[MASK]" ,**__UpperCAmelCase ,) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A__ = (
AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ,normalized=__UpperCAmelCase )
if isinstance(__UpperCAmelCase ,__UpperCAmelCase )
else mask_token
)
super().__init__(
__UpperCAmelCase ,tokenizer_file=__UpperCAmelCase ,do_lower_case=__UpperCAmelCase ,remove_space=__UpperCAmelCase ,keep_accents=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,**__UpperCAmelCase ,)
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = False if not self.vocab_file else True
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ = os.path.join(
__UpperCAmelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file ,__UpperCAmelCase )
return (out_vocab_file,)
| 536 | """simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCamelCase__( __A ):
def snake_case__ ( self ,__UpperCAmelCase ) -> float:
return 0.0
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
A__ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(UpperCamelCase__ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.abs(np.fft.fft(UpperCamelCase__ ) )
A__ = 20 * np.logaa(UpperCamelCase__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
A__ = get_bounds(UpperCamelCase__ , UpperCamelCase__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(UpperCamelCase__ )
plt.show()
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(UpperCamelCase__ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.angle(np.fft.fft(UpperCamelCase__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(UpperCamelCase__ , -2 * pi ) )
plt.show()
| 536 | 1 |
'''simple docstring'''
def UpperCAmelCase_ ( lowerCAmelCase_ = 1000 ):
"""simple docstring"""
lowercase = 2**power
lowercase = 0
while n:
lowercase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 310 | '''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> list:
"""simple docstring"""
_UpperCAmelCase : Tuple = int(_UpperCAmelCase )
if n_element < 1:
_UpperCAmelCase : Tuple = ValueError("a should be a positive number" )
raise my_error
_UpperCAmelCase : Optional[Any] = [1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = (0, 0, 0)
_UpperCAmelCase : str = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
__SCREAMING_SNAKE_CASE : Union[str, Any] = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 244 | 0 |
from __future__ import annotations
a__ : str = tuple[int, int, int]
a__ : int = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
a__ : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
a__ : List[str] = "EGZWVONAHDCLFQMSIPJBYUKXTR"
a__ : List[str] = "FOBHMDKEXQNRAULPGSJVTYICZW"
a__ : Optional[int] = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
a__ : int = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
a__ : Any = "RMDJXFUWGISLHVTCQNKYPBEZOA"
a__ : Tuple = "SGLCPQWZHKXAREONTFBVIYJUDM"
a__ : List[Any] = "HVSICLTYKQUBXDWAJZOMFGPREN"
a__ : int = "RZWQHFMVDBKICJLNTUXAGYPSOE"
a__ : int = "LFKIJODBEGAMQPXVUHYSTCZRWN"
a__ : str = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def _lowerCAmelCase ( A__ , A__ , A__ ):
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(A__ ) )) < 3:
lowercase__ = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(A__ )
# Checks if rotor positions are valid
lowercase__, lowercase__, lowercase__ = rotpos
if not 0 < rotorposa <= len(A__ ):
lowercase__ = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(A__ )
if not 0 < rotorposa <= len(A__ ):
lowercase__ = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(A__ )
if not 0 < rotorposa <= len(A__ ):
lowercase__ = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(A__ )
# Validates string and returns dict
lowercase__ = _plugboard(A__ )
return rotpos, rotsel, pbdict
def _lowerCAmelCase ( A__ ):
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(A__ , A__ ):
lowercase__ = F'''Plugboard setting isn\'t type string ({type(A__ )})'''
raise TypeError(A__ )
elif len(A__ ) % 2 != 0:
lowercase__ = F'''Odd number of symbols ({len(A__ )})'''
raise Exception(A__ )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
lowercase__ = set()
for i in pbstring:
if i not in abc:
lowercase__ = F'''\'{i}\' not in list of symbols'''
raise Exception(A__ )
elif i in tmppbl:
lowercase__ = F'''Duplicate symbol ({i})'''
raise Exception(A__ )
else:
tmppbl.add(A__ )
del tmppbl
# Created the dictionary
lowercase__ = {}
for j in range(0 , len(A__ ) - 1 , 2 ):
lowercase__ = pbstring[j + 1]
lowercase__ = pbstring[j]
return pb
def _lowerCAmelCase ( A__ , A__ , A__ = (rotora, rotora, rotora) , A__ = "" , ):
lowercase__ = text.upper()
lowercase__, lowercase__, lowercase__ = _validator(
A__ , A__ , plugb.upper() )
lowercase__, lowercase__, lowercase__ = rotor_position
lowercase__, lowercase__, lowercase__ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowercase__ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowercase__ = plugboard[symbol]
# rotor ra --------------------------
lowercase__ = abc.index(A__ ) + rotorposa
lowercase__ = rotora[index % len(A__ )]
# rotor rb --------------------------
lowercase__ = abc.index(A__ ) + rotorposa
lowercase__ = rotora[index % len(A__ )]
# rotor rc --------------------------
lowercase__ = abc.index(A__ ) + rotorposa
lowercase__ = rotora[index % len(A__ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowercase__ = reflector[symbol]
# 2nd rotors
lowercase__ = abc[rotora.index(A__ ) - rotorposa]
lowercase__ = abc[rotora.index(A__ ) - rotorposa]
lowercase__ = abc[rotora.index(A__ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowercase__ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(A__ ):
lowercase__ = 0
rotorposa += 1
if rotorposa >= len(A__ ):
lowercase__ = 0
rotorposa += 1
if rotorposa >= len(A__ ):
lowercase__ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(A__ )
return "".join(A__ )
if __name__ == "__main__":
a__ : List[Any] = "This is my Python script that emulates the Enigma machine from WWII."
a__ : Any = (1, 1, 1)
a__ : Union[str, Any] = "pictures"
a__ : str = (rotora, rotora, rotora)
a__ : List[str] = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 719 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : str = (DDIMParallelScheduler,)
A : Any = (("eta", 0.0), ("num_inference_steps", 50))
def UpperCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = {
'num_train_timesteps': 10_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**lowerCAmelCase)
return config
def UpperCAmelCase ( self : int , **lowerCAmelCase : str) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**lowerCAmelCase)
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase)
for t in scheduler.timesteps:
lowercase__ = model(lowerCAmelCase , lowerCAmelCase)
lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase).prev_sample
return sample
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase)
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(steps_offset=1)
lowercase__ = scheduler_class(**lowerCAmelCase)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1]))
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , )
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00]):
self.check_over_forward(time_step=lowerCAmelCase , num_inference_steps=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=lowerCAmelCase , eta=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00) - 0.1_47_71)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60) - 0.3_24_60)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98) - 0.02)) < 1E-5
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
scheduler.set_timesteps(lowerCAmelCase)
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = self.dummy_sample_deter + 0.1
lowercase__ = self.dummy_sample_deter - 0.1
lowercase__ = samplea.shape[0]
lowercase__ = torch.stack([samplea, samplea, samplea] , dim=0)
lowercase__ = torch.arange(lowerCAmelCase)[0:3, None].repeat(1 , lowerCAmelCase)
lowercase__ = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowercase__ = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , lowerCAmelCase)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 11_47.79_04) < 1E-2
assert abs(result_mean.item() - 0.49_82) < 1E-3
def UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
lowercase__ = self.full_loop()
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_72.00_67) < 1E-2
assert abs(result_mean.item() - 0.22_39_67) < 1E-3
def UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(prediction_type='v_prediction')
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 52.53_02) < 1E-2
assert abs(result_mean.item() - 0.06_84) < 1E-3
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.82_95) < 1E-2
assert abs(result_mean.item() - 0.19_51) < 1E-3
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.07_84) < 1E-2
assert abs(result_mean.item() - 0.19_41) < 1E-3
| 642 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = "vit"
def __init__( self : List[str], UpperCamelCase__ : Optional[int]=7_68, UpperCamelCase__ : Union[str, Any]=12, UpperCamelCase__ : str=12, UpperCamelCase__ : int=30_72, UpperCamelCase__ : List[str]="gelu", UpperCamelCase__ : Dict=0.0, UpperCamelCase__ : str=0.0, UpperCamelCase__ : List[Any]=0.02, UpperCamelCase__ : str=1e-12, UpperCamelCase__ : Optional[Any]=2_24, UpperCamelCase__ : Optional[Any]=16, UpperCamelCase__ : int=3, UpperCamelCase__ : Tuple=True, UpperCamelCase__ : Tuple=16, **UpperCamelCase__ : List[Any], ) -> int:
super().__init__(**UpperCamelCase__ )
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = image_size
_A = patch_size
_A = num_channels
_A = qkv_bias
_A = encoder_stride
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = version.parse("1.11" )
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> float:
return 1e-4
| 107 | '''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int], UpperCamelCase__ : Dict, UpperCamelCase__ : Dict=13, UpperCamelCase__ : Optional[Any]=7, UpperCamelCase__ : List[str]=True, UpperCamelCase__ : Union[str, Any]=True, UpperCamelCase__ : Optional[int]=True, UpperCamelCase__ : Optional[Any]=True, UpperCamelCase__ : Dict=99, UpperCamelCase__ : Dict=32, UpperCamelCase__ : Any=2, UpperCamelCase__ : Optional[int]=4, UpperCamelCase__ : Tuple=37, UpperCamelCase__ : Union[str, Any]="gelu", UpperCamelCase__ : Optional[Any]=0.1, UpperCamelCase__ : Any=0.1, UpperCamelCase__ : Union[str, Any]=5_12, UpperCamelCase__ : Optional[Any]=16, UpperCamelCase__ : List[str]=2, UpperCamelCase__ : List[Any]=0.02, UpperCamelCase__ : List[str]=3, UpperCamelCase__ : Optional[Any]=4, UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Union[str, Any]=0, ) -> str:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
_A = projection_dim
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
_A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_A = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size], self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
_A = ids_tensor([self.batch_size], self.num_choices )
_A = BertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=UpperCamelCase__, initializer_range=self.initializer_range, )
_A = DPRConfig(projection_dim=self.projection_dim, **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : Any, UpperCamelCase__ : Tuple, UpperCamelCase__ : List[Any], UpperCamelCase__ : Any, UpperCamelCase__ : List[Any], UpperCamelCase__ : Union[str, Any] ) -> int:
_A = TFDPRContextEncoder(config=UpperCamelCase__ )
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__ )
_A = model(UpperCamelCase__, token_type_ids=UpperCamelCase__ )
_A = model(UpperCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : int, UpperCamelCase__ : List[Any], UpperCamelCase__ : List[str], UpperCamelCase__ : List[Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : str, UpperCamelCase__ : str ) -> int:
_A = TFDPRQuestionEncoder(config=UpperCamelCase__ )
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__ )
_A = model(UpperCamelCase__, token_type_ids=UpperCamelCase__ )
_A = model(UpperCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCAmelCase ( self : int, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : List[str], UpperCamelCase__ : Tuple, UpperCamelCase__ : List[str], UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[int] ) -> Any:
_A = TFDPRReader(config=UpperCamelCase__ )
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape, (self.batch_size,) )
def __UpperCAmelCase ( self : Dict ) -> Dict:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class lowercase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__lowerCAmelCase = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_A = TFDPRModelTester(self )
_A = ConfigTester(self, config_class=UpperCamelCase__, hidden_size=37 )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCamelCase__ )
def __UpperCAmelCase ( self : int ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCamelCase__ )
def __UpperCAmelCase ( self : int ) -> Tuple:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCamelCase__ )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFDPRContextEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFDPRContextEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFDPRQuestionEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFDPRReader.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
_A = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
_A = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
_A = model(UpperCamelCase__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_A = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy(), expected_slice.numpy(), atol=1e-4 ) )
| 107 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """spiece.model"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
lowerCamelCase__ = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
lowerCamelCase__ = 0
lowerCamelCase__ = 1
lowerCamelCase__ = 2
lowerCamelCase__ = 3
lowerCamelCase__ = 4
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[str] =VOCAB_FILES_NAMES
__lowerCamelCase : str =PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[Any] ='left'
def __init__( self : List[str] , __lowercase : List[Any] , __lowercase : Tuple=False , __lowercase : str=True , __lowercase : Dict=False , __lowercase : Optional[int]="<s>" , __lowercase : List[Any]="</s>" , __lowercase : Optional[Any]="<unk>" , __lowercase : List[str]="<sep>" , __lowercase : List[Any]="<pad>" , __lowercase : Tuple="<cls>" , __lowercase : Any="<mask>" , __lowercase : Any=["<eop>", "<eod>"] , __lowercase : Optional[Dict[str, Any]] = None , **__lowercase : Tuple , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
__a = 3
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self : Optional[Any] , __lowercase : Any ):
'''simple docstring'''
__a = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self : Any , __lowercase : str ):
'''simple docstring'''
if self.remove_space:
__a = """ """.join(inputs.strip().split() )
else:
__a = inputs
__a = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
__a = unicodedata.normalize("""NFKD""" , __lowercase )
__a = """""".join([c for c in outputs if not unicodedata.combining(__lowercase )] )
if self.do_lower_case:
__a = outputs.lower()
return outputs
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
__a = self.preprocess_text(__lowercase )
__a = self.sp_model.encode(__lowercase , out_type=__lowercase )
__a = []
for piece in pieces:
if len(__lowercase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
__a = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowercase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__a = cur_pieces[1:]
else:
__a = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowercase )
else:
new_pieces.append(__lowercase )
return new_pieces
def UpperCamelCase_ ( self : Dict , __lowercase : Any ):
'''simple docstring'''
return self.sp_model.PieceToId(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[Any] ):
'''simple docstring'''
return self.sp_model.IdToPiece(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : Optional[Any] ):
'''simple docstring'''
__a = """""".join(__lowercase ).replace(__lowercase , """ """ ).strip()
return out_string
def UpperCamelCase_ ( self : List[Any] , __lowercase : List[int] , __lowercase : bool = False , __lowercase : bool = None , __lowercase : bool = True , **__lowercase : Union[str, Any] , ):
'''simple docstring'''
__a = kwargs.pop("""use_source_tokenizer""" , __lowercase )
__a = self.convert_ids_to_tokens(__lowercase , skip_special_tokens=__lowercase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__a = []
__a = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__lowercase ) )
__a = []
sub_texts.append(__lowercase )
else:
current_sub_text.append(__lowercase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__lowercase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__a = """""".join(__lowercase )
__a = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__a = self.clean_up_tokenization(__lowercase )
return clean_text
else:
return text
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase_ ( self : Any , __lowercase : List[int] , __lowercase : Optional[List[int]] = None , __lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is not None:
return ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1, 1]
return ([0] * len(__lowercase )) + [1, 1]
def UpperCamelCase_ ( self : int , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , """wb""" ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
| 547 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : str =['pixel_values']
def __init__( self : str , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = PIL.Image.BICUBIC , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : Union[int, float] = 1 / 255 , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Any , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = size if size is not None else {"""height""": 256, """width""": 256}
__a = get_size_dict(__lowercase )
__a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase , param_name="""crop_size""" )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase_ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PIL.Image.BICUBIC , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Union[str, Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}" )
return resize(
__lowercase , size=(size["""height"""], size["""width"""]) , resample=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : int , __lowercase : np.ndarray , __lowercase : Union[int, float] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[str] , ):
'''simple docstring'''
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Tuple , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str , ):
'''simple docstring'''
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : ImageInput , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : Dict=None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : bool = None , __lowercase : float = None , __lowercase : bool = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : ChannelDimension = ChannelDimension.FIRST , **__lowercase : Optional[Any] , ):
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(__lowercase )
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(__lowercase , param_name="""crop_size""" )
__a = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__a = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__a = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
__a = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__a = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__a = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__a = {"""pixel_values""": images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 547 | 1 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class _snake_case :
def __init__( self : Tuple ):
__lowerCamelCase : Dict = {}
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : List[str]=1 ):
if self.graph.get(_A ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
__lowerCamelCase : Tuple = [[w, v]]
if not self.graph.get(_A ):
__lowerCamelCase : List[Any] = []
def lowerCamelCase__ ( self : Dict ):
return list(self.graph )
def lowerCamelCase__ ( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : str ):
if self.graph.get(_A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_A )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : List[Any]=-2 , UpperCAmelCase : Optional[int]=-1 ):
if s == d:
return []
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : int = []
if s == -2:
__lowerCamelCase : Tuple = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
__lowerCamelCase : List[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_A ) != 0:
__lowerCamelCase : Dict = stack[len(_A ) - 1]
else:
__lowerCamelCase : str = ss
# check if se have reached the starting point
if len(_A ) == 0:
return visited
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : int=-1 ):
if c == -1:
__lowerCamelCase : Tuple = floor(random() * 10000 ) + 10
for i in range(_A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
__lowerCamelCase : Union[str, Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(_A , _A , 1 )
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : List[str]=-2 ):
__lowerCamelCase : int = deque()
__lowerCamelCase : Optional[int] = []
if s == -2:
__lowerCamelCase : Union[str, Any] = list(self.graph )[0]
d.append(_A )
visited.append(_A )
while d:
__lowerCamelCase : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase__ ( self : int , UpperCAmelCase : Union[str, Any] ):
__lowerCamelCase : List[str] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Union[str, Any] ):
return len(self.graph[u] )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Optional[Any]=-2 ):
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : List[Any] = []
if s == -2:
__lowerCamelCase : List[str] = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
__lowerCamelCase : Any = s
__lowerCamelCase : Any = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase : Dict = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_A ) != 0:
__lowerCamelCase : Union[str, Any] = stack[len(_A ) - 1]
else:
__lowerCamelCase : Union[str, Any] = ss
# check if se have reached the starting point
if len(_A ) == 0:
return sorted_nodes
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Any = []
__lowerCamelCase : List[str] = []
__lowerCamelCase : Union[str, Any] = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
__lowerCamelCase : int = -2
__lowerCamelCase : List[str] = []
__lowerCamelCase : Optional[int] = s
__lowerCamelCase : Any = False
__lowerCamelCase : int = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__lowerCamelCase : Dict = len(_A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCamelCase : Union[str, Any] = True
if len(_A ) != 0:
__lowerCamelCase : List[str] = stack[len(_A ) - 1]
else:
__lowerCamelCase : Optional[Any] = False
indirect_parents.append(_A )
__lowerCamelCase : Tuple = s
__lowerCamelCase : Tuple = ss
# check if se have reached the starting point
if len(_A ) == 0:
return list(_A )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Tuple = []
__lowerCamelCase : Dict = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
__lowerCamelCase : Union[str, Any] = -2
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : List[Any] = s
__lowerCamelCase : str = False
__lowerCamelCase : str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__lowerCamelCase : List[str] = len(_A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCamelCase : Union[str, Any] = True
if len(_A ) != 0:
__lowerCamelCase : int = stack[len(_A ) - 1]
else:
__lowerCamelCase : Dict = False
indirect_parents.append(_A )
__lowerCamelCase : int = s
__lowerCamelCase : Optional[int] = ss
# check if se have reached the starting point
if len(_A ) == 0:
return False
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Any=-2 , UpperCAmelCase : List[str]=-1 ):
__lowerCamelCase : Any = time()
self.dfs(_A , _A )
__lowerCamelCase : Union[str, Any] = time()
return end - begin
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Optional[int]=-2 ):
__lowerCamelCase : Dict = time()
self.bfs(_A )
__lowerCamelCase : Any = time()
return end - begin
class _snake_case :
def __init__( self : List[str] ):
__lowerCamelCase : Optional[int] = {}
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any=1 ):
if self.graph.get(_A ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
__lowerCamelCase : Any = [[w, v]]
# add the other way
if self.graph.get(_A ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
__lowerCamelCase : List[str] = [[w, u]]
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict ):
if self.graph.get(_A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_A )
# the other way round
if self.graph.get(_A ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_A )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : int=-2 , UpperCAmelCase : Optional[int]=-1 ):
if s == d:
return []
__lowerCamelCase : int = []
__lowerCamelCase : Dict = []
if s == -2:
__lowerCamelCase : Optional[Any] = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
__lowerCamelCase : Optional[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase : Tuple = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_A ) != 0:
__lowerCamelCase : str = stack[len(_A ) - 1]
else:
__lowerCamelCase : Optional[Any] = ss
# check if se have reached the starting point
if len(_A ) == 0:
return visited
def lowerCamelCase__ ( self : str , UpperCAmelCase : Tuple=-1 ):
if c == -1:
__lowerCamelCase : str = floor(random() * 10000 ) + 10
for i in range(_A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
__lowerCamelCase : List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(_A , _A , 1 )
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : List[str]=-2 ):
__lowerCamelCase : Optional[Any] = deque()
__lowerCamelCase : Optional[int] = []
if s == -2:
__lowerCamelCase : int = list(self.graph )[0]
d.append(_A )
visited.append(_A )
while d:
__lowerCamelCase : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : int ):
return len(self.graph[u] )
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Dict = []
__lowerCamelCase : str = []
__lowerCamelCase : Any = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
__lowerCamelCase : Union[str, Any] = -2
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : List[Any] = s
__lowerCamelCase : Any = False
__lowerCamelCase : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase : int = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__lowerCamelCase : Tuple = len(_A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCamelCase : Dict = True
if len(_A ) != 0:
__lowerCamelCase : Optional[int] = stack[len(_A ) - 1]
else:
__lowerCamelCase : Optional[int] = False
indirect_parents.append(_A )
__lowerCamelCase : Optional[int] = s
__lowerCamelCase : Dict = ss
# check if se have reached the starting point
if len(_A ) == 0:
return list(_A )
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : List[Any] = []
__lowerCamelCase : str = []
__lowerCamelCase : Any = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
__lowerCamelCase : Dict = -2
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Optional[Any] = s
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase : List[str] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__lowerCamelCase : Union[str, Any] = len(_A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCamelCase : List[str] = True
if len(_A ) != 0:
__lowerCamelCase : Any = stack[len(_A ) - 1]
else:
__lowerCamelCase : Optional[Any] = False
indirect_parents.append(_A )
__lowerCamelCase : List[str] = s
__lowerCamelCase : Optional[Any] = ss
# check if se have reached the starting point
if len(_A ) == 0:
return False
def lowerCamelCase__ ( self : int ):
return list(self.graph )
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : Any=-2 , UpperCAmelCase : List[str]=-1 ):
__lowerCamelCase : Union[str, Any] = time()
self.dfs(_A , _A )
__lowerCamelCase : Tuple = time()
return end - begin
def lowerCamelCase__ ( self : int , UpperCAmelCase : str=-2 ):
__lowerCamelCase : Tuple = time()
self.bfs(_A )
__lowerCamelCase : int = time()
return end - begin | 646 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
UpperCamelCase__ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a__ ( ) -> List[str]:
UpperCAmelCase__ : int = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCAmelCase__ : List[Any] = g.get_repo('''huggingface/transformers''' )
UpperCAmelCase__ : List[str] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCAmelCase__ : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = comments[0] if len(lowerCAmelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 75 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class _UpperCAmelCase ( _snake_case):
def lowerCamelCase__ ( self , snake_case_ ):
return 0.0
def a__ ( a : np.ndarray , a : int ):
"""simple docstring"""
_snake_case : Any = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_snake_case : Union[str, Any] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def a__ ( a : FilterType , a : int ):
"""simple docstring"""
_snake_case : List[Any] = 512
_snake_case : Dict = [1] + [0] * (size - 1)
_snake_case : Union[str, Any] = [filter_type.process(a ) for item in inputs]
_snake_case : List[str] = [0] * (samplerate - size) # zero-padding
outputs += filler
_snake_case : List[Any] = np.abs(np.fft.fft(a ) )
_snake_case : List[Any] = 20 * np.logaa(a )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_snake_case : Tuple = get_bounds(a , a )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(a )
plt.show()
def a__ ( a : FilterType , a : int ):
"""simple docstring"""
_snake_case : str = 512
_snake_case : Optional[Any] = [1] + [0] * (size - 1)
_snake_case : Optional[Any] = [filter_type.process(a ) for item in inputs]
_snake_case : Union[str, Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_snake_case : Dict = np.angle(np.fft.fft(a ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(a , -2 * pi ) )
plt.show()
| 715 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _UpperCAmelCase ( _snake_case , _snake_case , unittest.TestCase):
__lowercase : Dict = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowercase : Optional[Any] = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase : Union[str, Any] = False
__lowercase : Optional[int] = False
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_=False ):
_snake_case : Union[str, Any] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
_snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
_snake_case : Optional[Any] = parent
_snake_case : List[Any] = batch_size
_snake_case : Optional[int] = seq_length
_snake_case : Dict = is_training
_snake_case : Union[str, Any] = use_input_mask
_snake_case : List[Any] = use_token_type_ids
_snake_case : int = use_labels
_snake_case : Dict = vocab_size
_snake_case : Tuple = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : Tuple = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : str = max_position_embeddings
_snake_case : str = type_vocab_size
_snake_case : Any = type_sequence_label_size
_snake_case : Optional[int] = initializer_range
_snake_case : List[Any] = num_labels
_snake_case : Optional[int] = num_choices
_snake_case : Optional[int] = scope
_snake_case : Any = embedding_size
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : Optional[Any] = None
if self.use_input_mask:
_snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : List[str] = None
if self.use_token_type_ids:
_snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case : Dict = None
_snake_case : Tuple = None
_snake_case : str = None
if self.use_labels:
_snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_snake_case : Tuple = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Dict = TFMobileBertModel(config=snake_case_ )
_snake_case : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Optional[int] = model(snake_case_ )
_snake_case : Union[str, Any] = [input_ids, input_mask]
_snake_case : Optional[Any] = model(snake_case_ )
_snake_case : Dict = model(snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : List[Any] = TFMobileBertForMaskedLM(config=snake_case_ )
_snake_case : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : List[str] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=snake_case_ )
_snake_case : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Tuple = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : str = TFMobileBertForPreTraining(config=snake_case_ )
_snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : List[Any] = model(snake_case_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : str = self.num_labels
_snake_case : str = TFMobileBertForSequenceClassification(config=snake_case_ )
_snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Any = self.num_choices
_snake_case : Tuple = TFMobileBertForMultipleChoice(config=snake_case_ )
_snake_case : List[Any] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : List[str] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : Tuple = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_snake_case : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Union[str, Any] = self.num_labels
_snake_case : Optional[int] = TFMobileBertForTokenClassification(config=snake_case_ )
_snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : List[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : int = TFMobileBertForQuestionAnswering(config=snake_case_ )
_snake_case : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Union[str, Any] = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Tuple = config_and_inputs
_snake_case : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def lowerCamelCase__ ( self ):
_snake_case : int = TFMobileBertModelTest.TFMobileBertModelTester(self )
_snake_case : Optional[Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCamelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
_snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case_ )
@slow
def lowerCamelCase__ ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_snake_case : str = TFMobileBertModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_tf
class _UpperCAmelCase ( unittest.TestCase):
@slow
def lowerCamelCase__ ( self ):
_snake_case : Any = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
_snake_case : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
_snake_case : Union[str, Any] = model(snake_case_ )[0]
_snake_case : int = [1, 6, 3_05_22]
self.assertEqual(output.shape , snake_case_ )
_snake_case : Optional[Any] = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
| 87 | 0 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : List[str]=7 , lowerCAmelCase : Dict=3 , lowerCAmelCase : Optional[int]=18 , lowerCAmelCase : List[Any]=30 , lowerCAmelCase : str=4_00 , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , ) -> Any:
"""simple docstring"""
lowercase__ = size if size is not None else {'height': 18, 'width': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Any = DPTImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
lowercase__ = DPTImageProcessingTester(self)
@property
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase , 'image_mean'))
self.assertTrue(hasattr(lowerCAmelCase , 'image_std'))
self.assertTrue(hasattr(lowerCAmelCase , 'do_normalize'))
self.assertTrue(hasattr(lowerCAmelCase , 'do_resize'))
self.assertTrue(hasattr(lowerCAmelCase , 'size'))
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 18, 'width': 18})
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {'height': 42, 'width': 42})
def UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image)
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(lowerCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray)
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(lowerCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCAmelCase ( self : Any) -> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor)
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(lowerCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 622 |
'''simple docstring'''
def lowerCamelCase__ ( A : int = 50 ):
'''simple docstring'''
UpperCAmelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 210 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
__magic_name__ = (DDPMScheduler,)
def _lowerCamelCase ( self , **UpperCAmelCase__ ) -> Any:
_A : Optional[Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase__ )
return config
def _lowerCamelCase ( self ) -> List[Any]:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Optional[int]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Optional[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> List[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> List[Any]:
self.check_over_configs(thresholding=UpperCAmelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase__ , prediction_type=UpperCAmelCase__ , sample_max_value=UpperCAmelCase__ , )
def _lowerCamelCase ( self ) -> Optional[int]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Optional[Any]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> int:
_A : List[str] = self.scheduler_classes[0]
_A : Any = self.get_scheduler_config()
_A : Any = scheduler_class(**UpperCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.0_2 ) ) < 1e-5
def _lowerCamelCase ( self ) -> Dict:
_A : Dict = self.scheduler_classes[0]
_A : Tuple = self.get_scheduler_config()
_A : Dict = scheduler_class(**UpperCAmelCase__ )
_A : Any = len(UpperCAmelCase__ )
_A : Union[str, Any] = self.dummy_model()
_A : Dict = self.dummy_sample_deter
_A : int = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase__ ) ):
# 1. predict noise residual
_A : Dict = model(UpperCAmelCase__ , UpperCAmelCase__ )
# 2. predict previous mean of sample x_t-1
_A : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_A : int = pred_prev_sample
_A : Union[str, Any] = torch.sum(torch.abs(UpperCAmelCase__ ) )
_A : List[str] = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def _lowerCamelCase ( self ) -> str:
_A : int = self.scheduler_classes[0]
_A : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
_A : List[str] = scheduler_class(**UpperCAmelCase__ )
_A : int = len(UpperCAmelCase__ )
_A : str = self.dummy_model()
_A : Optional[int] = self.dummy_sample_deter
_A : List[str] = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase__ ) ):
# 1. predict noise residual
_A : Optional[int] = model(UpperCAmelCase__ , UpperCAmelCase__ )
# 2. predict previous mean of sample x_t-1
_A : str = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_A : Union[str, Any] = pred_prev_sample
_A : Optional[Any] = torch.sum(torch.abs(UpperCAmelCase__ ) )
_A : int = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def _lowerCamelCase ( self ) -> Union[str, Any]:
_A : Tuple = self.scheduler_classes[0]
_A : Tuple = self.get_scheduler_config()
_A : Any = scheduler_class(**UpperCAmelCase__ )
_A : int = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
_A : Optional[Any] = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase__ ):
if i == len(UpperCAmelCase__ ) - 1:
_A : Any = -1
else:
_A : int = timesteps[i + 1]
_A : Optional[int] = scheduler.previous_timestep(UpperCAmelCase__ )
_A : List[Any] = prev_t.item()
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> List[Any]:
_A : Optional[int] = self.scheduler_classes[0]
_A : List[Any] = self.get_scheduler_config()
_A : Optional[int] = scheduler_class(**UpperCAmelCase__ )
_A : Union[str, Any] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(UpperCAmelCase__ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Optional[int]:
_A : Union[str, Any] = self.scheduler_classes[0]
_A : Tuple = self.get_scheduler_config()
_A : List[Any] = scheduler_class(**UpperCAmelCase__ )
_A : Any = [1_0_0, 8_7, 5_0, 1, 0]
_A : Tuple = len(UpperCAmelCase__ )
with self.assertRaises(UpperCAmelCase__ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase__ , timesteps=UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Optional[Any]:
_A : Dict = self.scheduler_classes[0]
_A : List[str] = self.get_scheduler_config()
_A : Any = scheduler_class(**UpperCAmelCase__ )
_A : Union[str, Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase__ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
| 705 |
'''simple docstring'''
import socket
def lowercase ( ):
"""simple docstring"""
_A : Optional[int] = socket.socket(socket.AF_INET , socket.SOCK_STREAM)
_A : List[str] = socket.gethostname()
_A : Tuple = 1_2312
sock.connect((host, port))
sock.send(B'''Hello server!''')
with open('''Received_file''' , '''wb''') as out_file:
print('''File opened''')
print('''Receiving data...''')
while True:
_A : int = sock.recv(1024)
if not data:
break
out_file.write(lowerCAmelCase)
print('''Successfully received the file''')
sock.close()
print('''Connection closed''')
if __name__ == "__main__":
main()
| 417 | 0 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __UpperCamelCase ( lowerCAmelCase__ : str ):
return x + 2
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : int ):
__a : Union[str, Any] = '''x = 3'''
__a : List[str] = {}
__a : int = evaluate(snake_case_ , {} , state=snake_case_ )
assert result == 3
self.assertDictEqual(snake_case_ , {'''x''': 3} )
__a : Optional[Any] = '''x = y'''
__a : Optional[Any] = {'''y''': 5}
__a : str = evaluate(snake_case_ , {} , state=snake_case_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(snake_case_ , {'''x''': 5, '''y''': 5} )
def lowerCAmelCase (self : Any ):
__a : Optional[Any] = '''y = add_two(x)'''
__a : Optional[Any] = {'''x''': 3}
__a : Any = evaluate(snake_case_ , {'''add_two''': add_two} , state=snake_case_ )
assert result == 5
self.assertDictEqual(snake_case_ , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
__a : Any = evaluate(snake_case_ , {} , state=snake_case_ )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCAmelCase (self : Any ):
__a : List[Any] = '''x = 3'''
__a : Optional[Any] = {}
__a : str = evaluate(snake_case_ , {} , state=snake_case_ )
assert result == 3
self.assertDictEqual(snake_case_ , {'''x''': 3} )
def lowerCAmelCase (self : Optional[Any] ):
__a : Union[str, Any] = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
__a : int = {'''x''': 3}
__a : int = evaluate(snake_case_ , {'''add_two''': add_two} , state=snake_case_ )
self.assertDictEqual(snake_case_ , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(snake_case_ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowerCAmelCase (self : int ):
__a : Optional[Any] = '''x = 3\ny = 5'''
__a : List[Any] = {}
__a : Dict = evaluate(snake_case_ , {} , state=snake_case_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(snake_case_ , {'''x''': 3, '''y''': 5} )
def lowerCAmelCase (self : Optional[Any] ):
__a : List[str] = '''text = f\'This is x: {x}.\''''
__a : Optional[int] = {'''x''': 3}
__a : str = evaluate(snake_case_ , {} , state=snake_case_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(snake_case_ , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def lowerCAmelCase (self : Optional[int] ):
__a : Union[str, Any] = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
__a : Tuple = {'''x''': 3}
__a : Tuple = evaluate(snake_case_ , {} , state=snake_case_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(snake_case_ , {'''x''': 3, '''y''': 2} )
__a : Any = {'''x''': 8}
__a : Tuple = evaluate(snake_case_ , {} , state=snake_case_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(snake_case_ , {'''x''': 8, '''y''': 5} )
def lowerCAmelCase (self : Optional[Any] ):
__a : Any = '''test_list = [x, add_two(x)]'''
__a : Optional[int] = {'''x''': 3}
__a : List[Any] = evaluate(snake_case_ , {'''add_two''': add_two} , state=snake_case_ )
self.assertListEqual(snake_case_ , [3, 5] )
self.assertDictEqual(snake_case_ , {'''x''': 3, '''test_list''': [3, 5]} )
def lowerCAmelCase (self : Optional[int] ):
__a : Any = '''y = x'''
__a : List[str] = {'''x''': 3}
__a : List[Any] = evaluate(snake_case_ , {} , state=snake_case_ )
assert result == 3
self.assertDictEqual(snake_case_ , {'''x''': 3, '''y''': 3} )
def lowerCAmelCase (self : Optional[int] ):
__a : List[Any] = '''test_list = [x, add_two(x)]\ntest_list[1]'''
__a : Optional[int] = {'''x''': 3}
__a : Tuple = evaluate(snake_case_ , {'''add_two''': add_two} , state=snake_case_ )
assert result == 5
self.assertDictEqual(snake_case_ , {'''x''': 3, '''test_list''': [3, 5]} )
__a : Optional[int] = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
__a : Optional[int] = {'''x''': 3}
__a : Optional[Any] = evaluate(snake_case_ , {'''add_two''': add_two} , state=snake_case_ )
assert result == 5
self.assertDictEqual(snake_case_ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowerCAmelCase (self : List[str] ):
__a : str = '''x = 0\nfor i in range(3):\n x = i'''
__a : List[Any] = {}
__a : Any = evaluate(snake_case_ , {'''range''': range} , state=snake_case_ )
assert result == 2
self.assertDictEqual(snake_case_ , {'''x''': 2, '''i''': 2} )
| 521 |
from maths.prime_check import is_prime
def __UpperCamelCase ( lowerCAmelCase__ : int ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : str = f"Input value of [number={number}] must be an integer"
raise TypeError(lowerCAmelCase__ )
if is_prime(lowerCAmelCase__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 521 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 712 |
"""simple docstring"""
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 0
while number > 0:
lowerCamelCase__ = number % 10
sum_of_digits += last_digit
lowerCamelCase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case ( _a: int = 100 )-> int:
'''simple docstring'''
lowerCamelCase__ = factorial(_a )
lowerCamelCase__ = split_and_add(_a )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 659 | 0 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :Collection[float] | None = None ):
if components is None:
UpperCamelCase__ :Tuple = []
UpperCamelCase__ :Optional[Any] = list(lowerCamelCase__ )
def __len__( self :List[Any] ):
return len(self.__components )
def __str__( self :List[Any] ):
return "(" + ",".join(map(lowerCamelCase__ , self.__components ) ) + ")"
def __add__( self :List[Any] , lowerCamelCase__ :Vector ):
UpperCamelCase__ :Optional[int] = len(self )
if size == len(lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = [self.__components[i] + other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )]
return Vector(lowerCamelCase__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self :Any , lowerCamelCase__ :Vector ):
UpperCamelCase__ :List[Any] = len(self )
if size == len(lowerCamelCase__ ):
UpperCamelCase__ :Dict = [self.__components[i] - other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )]
return Vector(lowerCamelCase__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self :str , lowerCamelCase__ :float ):
...
@overload
def __mul__( self :Union[str, Any] , lowerCamelCase__ :Vector ):
...
def __mul__( self :List[Any] , lowerCamelCase__ :float | Vector ):
if isinstance(lowerCamelCase__ , (float, int) ):
UpperCamelCase__ :int = [c * other for c in self.__components]
return Vector(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(self ) == len(lowerCamelCase__ ):
UpperCamelCase__ :Tuple = len(self )
UpperCamelCase__ :List[str] = [self.__components[i] * other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )]
return sum(lowerCamelCase__ )
else: # error case
raise Exception("""invalid operand!""" )
def __a ( self :Dict ):
return Vector(self.__components )
def __a ( self :Optional[Any] , lowerCamelCase__ :int ):
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __a ( self :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :float ):
assert -len(self.__components ) <= pos < len(self.__components )
UpperCamelCase__ :List[Any] = value
def __a ( self :Optional[Any] ):
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
UpperCamelCase__ :List[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCamelCase__ ) )
def __a ( self :Any , lowerCamelCase__ :Vector , lowerCamelCase__ :bool = False ):
UpperCamelCase__ :Tuple = self * other
UpperCamelCase__ :Tuple = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def A ( lowercase__ : int ) -> Vector:
assert isinstance(lowercase__ , lowercase__ )
return Vector([0] * dimension )
def A ( lowercase__ : int , lowercase__ : int ) -> Vector:
assert isinstance(lowercase__ , lowercase__ ) and (isinstance(lowercase__ , lowercase__ ))
UpperCamelCase__ :List[Any] = [0] * dimension
UpperCamelCase__ :int = 1
return Vector(lowercase__ )
def A ( lowercase__ : float , lowercase__ : Vector , lowercase__ : Vector ) -> Vector:
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and (isinstance(lowercase__ , (int, float) ))
)
return x * scalar + y
def A ( lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> Vector:
random.seed(lowercase__ )
UpperCamelCase__ :Tuple = [random.randint(lowercase__ , lowercase__ ) for _ in range(lowercase__ )]
return Vector(lowercase__ )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :List[str] , lowerCamelCase__ :list[list[float]] , lowerCamelCase__ :int , lowerCamelCase__ :int ):
UpperCamelCase__ :List[Any] = matrix
UpperCamelCase__ :Dict = w
UpperCamelCase__ :int = h
def __str__( self :Tuple ):
UpperCamelCase__ :Tuple = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self :List[str] , lowerCamelCase__ :Matrix ):
if self.__width == other.width() and self.__height == other.height():
UpperCamelCase__ :Any = []
for i in range(self.__height ):
UpperCamelCase__ :Dict = [
self.__matrix[i][j] + other.component(lowerCamelCase__ , lowerCamelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCamelCase__ )
return Matrix(lowerCamelCase__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self :List[Any] , lowerCamelCase__ :Matrix ):
if self.__width == other.width() and self.__height == other.height():
UpperCamelCase__ :List[Any] = []
for i in range(self.__height ):
UpperCamelCase__ :Optional[int] = [
self.__matrix[i][j] - other.component(lowerCamelCase__ , lowerCamelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCamelCase__ )
return Matrix(lowerCamelCase__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self :Dict , lowerCamelCase__ :float ):
...
@overload
def __mul__( self :Optional[int] , lowerCamelCase__ :Vector ):
...
def __mul__( self :Optional[Any] , lowerCamelCase__ :float | Vector ):
if isinstance(lowerCamelCase__ , lowerCamelCase__ ): # matrix-vector
if len(lowerCamelCase__ ) == self.__width:
UpperCamelCase__ :List[Any] = zero_vector(self.__height )
for i in range(self.__height ):
UpperCamelCase__ :Union[str, Any] = [
self.__matrix[i][j] * other.component(lowerCamelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCamelCase__ , sum(lowerCamelCase__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowerCamelCase__ , (int, float) ): # matrix-scalar
UpperCamelCase__ :str = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCamelCase__ , self.__width , self.__height )
return None
def __a ( self :Dict ):
return self.__height
def __a ( self :int ):
return self.__width
def __a ( self :List[str] , lowerCamelCase__ :int , lowerCamelCase__ :int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __a ( self :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
UpperCamelCase__ :List[str] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def __a ( self :int , lowerCamelCase__ :int , lowerCamelCase__ :int ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
UpperCamelCase__ :Any = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCamelCase__ ) ):
UpperCamelCase__ :Any = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __a ( self :str , lowerCamelCase__ :int , lowerCamelCase__ :int ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCamelCase__ , lowerCamelCase__ )
else:
raise Exception("""Indices out of bounds""" )
def __a ( self :int ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
UpperCamelCase__ :str = [
self.__matrix[0][y] * self.cofactor(0 , lowerCamelCase__ ) for y in range(self.__width )
]
return sum(lowerCamelCase__ )
def A ( lowercase__ : int ) -> Matrix:
UpperCamelCase__ :list[list[float]] = [[0] * n for _ in range(lowercase__ )]
return Matrix(lowercase__ , lowercase__ , lowercase__ )
def A ( lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> Matrix:
random.seed(lowercase__ )
UpperCamelCase__ :list[list[float]] = [
[random.randint(lowercase__ , lowercase__ ) for _ in range(lowercase__ )] for _ in range(lowercase__ )
]
return Matrix(lowercase__ , lowercase__ , lowercase__ ) | 45 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCamelCase__ : Any = datasets.utils.logging.get_logger(__name__)
class _snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
__lowerCAmelCase : bool = None
__lowerCAmelCase : bool = None
class _snake_case ( folder_based_builder.FolderBasedBuilder ):
__lowerCAmelCase : Optional[Any] = datasets.Audio()
__lowerCAmelCase : Union[str, Any] = 'audio'
__lowerCAmelCase : str = AudioFolderConfig
__lowerCAmelCase : List[str] # definition at the bottom of the script
__lowerCAmelCase : Optional[int] = AudioClassification(audio_column='audio' , label_column='label' )
lowerCamelCase__ : int = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
lowerCamelCase__ : int = AUDIO_EXTENSIONS
| 12 | 0 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def lowercase ( a , a , a=None , **a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [x.strip() for x in open(_lowerCAmelCase ).readlines()]
SCREAMING_SNAKE_CASE_ :List[Any] = [x.strip() for x in open(_lowerCAmelCase ).readlines()][: len(_lowerCAmelCase )]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
if save_path is not None:
save_json(_lowerCAmelCase , _lowerCAmelCase , indent=_lowerCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 701 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple=7 , UpperCAmelCase : Tuple=3 , UpperCAmelCase : str=18 , UpperCAmelCase : Optional[Any]=30 , UpperCAmelCase : Optional[Any]=4_00 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Tuple=None , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : List[str]=True , ):
SCREAMING_SNAKE_CASE_ :Dict = size if size is not None else {"shortest_edge": 20}
SCREAMING_SNAKE_CASE_ :Any = crop_size if crop_size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE_ :Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ :Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ :Optional[int] = num_channels
SCREAMING_SNAKE_CASE_ :Optional[int] = image_size
SCREAMING_SNAKE_CASE_ :int = min_resolution
SCREAMING_SNAKE_CASE_ :Optional[Any] = max_resolution
SCREAMING_SNAKE_CASE_ :Optional[Any] = do_resize
SCREAMING_SNAKE_CASE_ :Tuple = size
SCREAMING_SNAKE_CASE_ :Optional[Any] = do_center_crop
SCREAMING_SNAKE_CASE_ :Union[str, Any] = crop_size
SCREAMING_SNAKE_CASE_ :int = do_flip_channel_order
def _snake_case ( self : Any):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _UpperCAmelCase ( lowercase , unittest.TestCase ):
lowerCamelCase_ : Any = MobileViTImageProcessor if is_vision_available() else None
def _snake_case ( self : str):
SCREAMING_SNAKE_CASE_ :List[str] = MobileViTImageProcessingTester(self)
@property
def _snake_case ( self : int):
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self : Any):
SCREAMING_SNAKE_CASE_ :Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase , "do_resize"))
self.assertTrue(hasattr(UpperCAmelCase , "size"))
self.assertTrue(hasattr(UpperCAmelCase , "do_center_crop"))
self.assertTrue(hasattr(UpperCAmelCase , "center_crop"))
self.assertTrue(hasattr(UpperCAmelCase , "do_flip_channel_order"))
def _snake_case ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 20})
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18})
SCREAMING_SNAKE_CASE_ :Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def _snake_case ( self : int):
pass
def _snake_case ( self : Union[str, Any]):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ :Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
SCREAMING_SNAKE_CASE_ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image)
# Test not batched input
SCREAMING_SNAKE_CASE_ :List[Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ :int = image_processing(UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _snake_case ( self : Optional[int]):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ :Any = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
SCREAMING_SNAKE_CASE_ :Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray)
# Test not batched input
SCREAMING_SNAKE_CASE_ :List[str] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ :List[Any] = image_processing(UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _snake_case ( self : str):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ :int = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor)
# Test not batched input
SCREAMING_SNAKE_CASE_ :Tuple = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ :Optional[int] = image_processing(UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 140 | 0 |
'''simple docstring'''
def lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
SCREAMING_SNAKE_CASE__ :Optional[int] = 6
SCREAMING_SNAKE_CASE__ :Tuple = 1
SCREAMING_SNAKE_CASE__ :Tuple = 1_9_0_1
SCREAMING_SNAKE_CASE__ :Dict = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
SCREAMING_SNAKE_CASE__ :List[str] = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
SCREAMING_SNAKE_CASE__ :int = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
SCREAMING_SNAKE_CASE__ :Optional[int] = day - days_per_month[month - 2]
if month > 1_2:
year += 1
SCREAMING_SNAKE_CASE__ :Tuple = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 209 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : int = 1_000_000 ) -> int:
lowerCamelCase_ : Optional[int] =set(range(3 , lowerCamelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , lowerCamelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCamelCase__ , lowerCamelCase__ ) ) )
lowerCamelCase_ : Any =[float(lowerCamelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase__ , limit + 1 , lowerCamelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 153 | 0 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def _lowerCamelCase ( __A : np.ndarray ) -> np.ndarray:
return input_array.reshape((input_array.size, 1) )
def _lowerCamelCase ( __A : np.ndarray , __A : np.ndarray , __A : int ) -> np.ndarray:
_UpperCAmelCase : Any = np.nan
for i in range(__A ):
_UpperCAmelCase : Union[str, Any] = features[:, labels == i]
_UpperCAmelCase : Any = data.mean(1 )
# Centralize the data of class i
_UpperCAmelCase : List[Any] = data - column_reshape(__A )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__A , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_UpperCAmelCase : Any = np.dot(__A , centered_data.T )
return covariance_sum / features.shape[1]
def _lowerCamelCase ( __A : np.ndarray , __A : np.ndarray , __A : int ) -> np.ndarray:
_UpperCAmelCase : Any = features.mean(1 )
_UpperCAmelCase : Optional[Any] = np.nan
for i in range(__A ):
_UpperCAmelCase : Union[str, Any] = features[:, labels == i]
_UpperCAmelCase : int = data.shape[1]
_UpperCAmelCase : List[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__A ) - column_reshape(__A ) , (column_reshape(__A ) - column_reshape(__A )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_UpperCAmelCase : Dict = device_data * np.dot(
column_reshape(__A ) - column_reshape(__A ) , (column_reshape(__A ) - column_reshape(__A )).T , )
return covariance_sum / features.shape[1]
def _lowerCamelCase ( __A : np.ndarray , __A : int ) -> np.ndarray:
# Check if the features have been loaded
if features.any():
_UpperCAmelCase : Any = features.mean(1 )
# Center the dataset
_UpperCAmelCase : int = features - np.reshape(__A , (data_mean.size, 1) )
_UpperCAmelCase : int = np.dot(__A , centered_data.T ) / features.shape[1]
_UpperCAmelCase , _UpperCAmelCase : Any = np.linalg.eigh(__A )
# Take all the columns in the reverse order (-1), and then takes only the first
_UpperCAmelCase : List[Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_UpperCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , __A )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__A )
logging.error('''Dataset empty''' )
raise AssertionError
def _lowerCamelCase ( __A : np.ndarray , __A : np.ndarray , __A : int , __A : int ) -> np.ndarray:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = eigh(
covariance_between_classes(__A , __A , __A ) , covariance_within_classes(__A , __A , __A ) , )
_UpperCAmelCase : Optional[int] = eigenvectors[:, ::-1][:, :dimensions]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = np.linalg.svd(__A )
_UpperCAmelCase : Union[str, Any] = svd_matrix[:, 0:dimensions]
_UpperCAmelCase : Tuple = np.dot(filtered_svd_matrix.T , __A )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__A )
logging.error('''Dataset empty''' )
raise AssertionError
def _lowerCamelCase ( ) -> None:
# Create dummy dataset with 2 classes and 3 features
_UpperCAmelCase : List[str] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_UpperCAmelCase : Tuple = np.array([0, 0, 0, 1, 1] )
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : List[str] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__A ) as error_info:
_UpperCAmelCase : Dict = linear_discriminant_analysis(
__A , __A , __A , __A )
if isinstance(__A , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def _lowerCamelCase ( ) -> None:
_UpperCAmelCase : List[str] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_UpperCAmelCase : List[str] = 2
_UpperCAmelCase : str = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(__A ) as error_info:
_UpperCAmelCase : str = principal_component_analysis(__A , __A )
if not np.allclose(__A , __A ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 186 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( __A : int , __A : Optional[Any] , __A : int ) -> int:
# Initialise PyTorch model
_UpperCAmelCase : Dict = RemBertConfig.from_json_file(__A )
print('''Building PyTorch model from configuration: {}'''.format(str(__A ) ) )
_UpperCAmelCase : int = RemBertModel(__A )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__A , __A , __A )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(__A ) )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 186 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Dict = '''visual_bert'''
def __init__( self : List[Any] , UpperCAmelCase__ : str=30522 , UpperCAmelCase__ : Dict=768 , UpperCAmelCase__ : Tuple=512 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Union[str, Any]=3072 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Optional[int]=0.0_2 , UpperCAmelCase__ : List[Any]=1E-12 , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=1 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[Any]=2 , **UpperCAmelCase__ : int , ) -> Optional[Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
_a : List[str] = vocab_size
_a : List[Any] = max_position_embeddings
_a : Tuple = hidden_size
_a : List[str] = visual_embedding_dim
_a : Optional[int] = num_hidden_layers
_a : List[str] = num_attention_heads
_a : str = intermediate_size
_a : int = hidden_act
_a : Any = hidden_dropout_prob
_a : Optional[Any] = attention_probs_dropout_prob
_a : List[Any] = initializer_range
_a : Union[str, Any] = type_vocab_size
_a : int = layer_norm_eps
_a : List[Any] = bypass_transformer
_a : List[str] = special_visual_initialize
| 389 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCamelCase :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : str=[1, 2, 1] , UpperCAmelCase__ : Union[str, Any]=[2, 2, 4] , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=2.0 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=0.0_2 , UpperCAmelCase__ : List[str]=1E-5 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Dict=10 , UpperCAmelCase__ : str=8 , UpperCAmelCase__ : Tuple=["stage1", "stage2", "stage3"] , UpperCAmelCase__ : Union[str, Any]=[1, 2, 3] , ) -> str:
_a : Union[str, Any] = parent
_a : str = batch_size
_a : int = image_size
_a : Optional[Any] = patch_size
_a : Tuple = num_channels
_a : str = embed_dim
_a : int = depths
_a : List[Any] = num_heads
_a : int = window_size
_a : Optional[int] = mlp_ratio
_a : Optional[int] = qkv_bias
_a : Dict = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : Tuple = drop_path_rate
_a : List[Any] = hidden_act
_a : List[Any] = use_absolute_embeddings
_a : Optional[Any] = patch_norm
_a : Dict = layer_norm_eps
_a : Dict = initializer_range
_a : Union[str, Any] = is_training
_a : List[str] = scope
_a : Any = use_labels
_a : Any = type_sequence_label_size
_a : Dict = encoder_stride
_a : Optional[int] = out_features
_a : Any = out_indices
def _lowercase ( self : Optional[Any] ) -> int:
_a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Tuple = None
if self.use_labels:
_a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Any = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Optional[int] ) -> List[str]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowercase ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple ) -> List[str]:
_a : int = MaskFormerSwinModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Any = model(UpperCAmelCase__ )
_a : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_a : List[str] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[Any]:
_a : Optional[Any] = MaskFormerSwinBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Union[str, Any] = model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCAmelCase__ ):
_a : Dict = ["""stem"""]
_a : Optional[int] = MaskFormerSwinBackbone(config=UpperCAmelCase__ )
def _lowercase ( self : Optional[int] ) -> Tuple:
_a : Optional[int] = self.prepare_config_and_inputs()
_a , _a , _a : str = config_and_inputs
_a : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : str = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase : Optional[Any] = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
UpperCamelCase : str = False
UpperCamelCase : List[str] = False
UpperCamelCase : str = False
UpperCamelCase : Tuple = False
UpperCamelCase : Optional[int] = False
def _lowercase ( self : Any ) -> int:
_a : Optional[int] = MaskFormerSwinModelTester(self )
_a : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _lowercase ( self : int ) -> str:
pass
def _lowercase ( self : Dict ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Tuple ) -> Union[str, Any]:
return
def _lowercase ( self : List[Any] ) -> Optional[Any]:
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : str ) -> Tuple:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _lowercase ( self : Dict ) -> List[str]:
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _lowercase ( self : Optional[Any] ) -> Dict:
pass
def _lowercase ( self : int ) -> Union[str, Any]:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Tuple = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def _lowercase ( self : Any ) -> Tuple:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(UpperCAmelCase__ )
_a : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Dict = [*signature.parameters.keys()]
_a : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _lowercase ( self : Optional[Any] ) -> int:
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _lowercase ( self : Any ) -> List[Any]:
pass
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ) -> Union[str, Any]:
_a : Optional[Any] = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
_a : Tuple = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
_a : Dict = outputs.hidden_states
_a : Optional[int] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
# Swin has a different seq_length
_a : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_a : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase ( self : str ) -> Dict:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_a : Optional[Any] = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : str = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Tuple = 3
_a : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_a : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_a : Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_a : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_a : Optional[int] = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Union[str, Any] = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _lowercase ( self : Any ) -> Any:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
pass
def _lowercase ( self : Dict ) -> List[str]:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCAmelCase__ : List[Any] ):
_a : int = 0
return t
def check_equivalence(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any]={} ):
with torch.no_grad():
_a : Union[str, Any] = model(**UpperCAmelCase__ , return_dict=UpperCAmelCase__ , **UpperCAmelCase__ )
_a : Any = model(**UpperCAmelCase__ , return_dict=UpperCAmelCase__ , **UpperCAmelCase__ ).to_tuple()
def recursive_check(UpperCAmelCase__ : int , UpperCAmelCase__ : str ):
if isinstance(UpperCAmelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
recursive_check(UpperCAmelCase__ , UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCAmelCase__ , UpperCAmelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCAmelCase__ ) , set_nan_tensor_to_zero(UpperCAmelCase__ ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(UpperCAmelCase__ ).any()} and `inf`: {torch.isinf(UpperCAmelCase__ )}. Dict has"""
f""" `nan`: {torch.isnan(UpperCAmelCase__ ).any()} and `inf`: {torch.isinf(UpperCAmelCase__ )}."""
) , )
recursive_check(UpperCAmelCase__ , UpperCAmelCase__ )
for model_class in self.all_model_classes:
_a : Any = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Dict = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Any = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : int = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
_a : List[Any] = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : Any = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Union[str, Any] = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , {"""output_hidden_states""": True} )
_a : int = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
_a : Tuple = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , {"""output_hidden_states""": True} )
@require_torch
class UpperCamelCase ( unittest.TestCase , snake_case_ ):
UpperCamelCase : Tuple = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCamelCase : Dict = MaskFormerSwinConfig
def _lowercase ( self : int ) -> int:
_a : Union[str, Any] = MaskFormerSwinModelTester(self )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
_a : str = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
_a : Optional[int] = backbone_class(UpperCAmelCase__ )
backbone.to(UpperCAmelCase__ )
backbone.eval()
_a : Any = backbone(**UpperCAmelCase__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCAmelCase__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_a : Union[str, Any] = backbone(**UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_a , _a , _a : Optional[int] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_a : Any = backbone(**UpperCAmelCase__ , output_attentions=UpperCAmelCase__ )
self.assertIsNotNone(outputs.attentions )
| 389 | 1 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowerCamelCase ( a_ : int):
if not is_accelerate_available():
return method
lowerCamelCase :Any = version.parse(accelerate.__version__).base_version
if version.parse(a_) < version.parse('''0.17.0'''):
return method
def wrapper(self : Optional[Any] , *a_ : List[str] , **a_ : str):
if hasattr(self , '''_hf_hook''') and hasattr(self._hf_hook , '''pre_forward'''):
self._hf_hook.pre_forward(self)
return method(self , *a_ , **a_)
return wrapper
| 49 | def _lowerCamelCase ( a_ : int = 4_00_00_00):
lowerCamelCase :Dict = [0, 1]
lowerCamelCase :Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
lowerCamelCase :Dict = 0
for j in range(len(a_) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 49 | 1 |
from math import pi
def __lowerCamelCase ( A__ : int , A__ : int ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 278 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCamelCase__ : List[Any] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
lowerCamelCase__ : List[str] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
lowerCamelCase__ : List[Any] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :str=None , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :Optional[int]="auto" , lowerCamelCase_ :Dict=-1 , lowerCamelCase_ :str=0.9 , lowerCamelCase_ :str=5 , lowerCamelCase_ :Tuple=5_00 , lowerCamelCase_ :str="gpt2-large" , lowerCamelCase_ :List[Any]=-1 , lowerCamelCase_ :Dict=10_24 , lowerCamelCase_ :Tuple=25 , lowerCamelCase_ :List[Any]=5 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :List[Any]=25 , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = compute_mauve(
p_text=lowerCamelCase_ , q_text=lowerCamelCase_ , p_features=lowerCamelCase_ , q_features=lowerCamelCase_ , p_tokens=lowerCamelCase_ , q_tokens=lowerCamelCase_ , num_buckets=lowerCamelCase_ , pca_max_data=lowerCamelCase_ , kmeans_explained_var=lowerCamelCase_ , kmeans_num_redo=lowerCamelCase_ , kmeans_max_iter=lowerCamelCase_ , featurize_model_name=lowerCamelCase_ , device_id=lowerCamelCase_ , max_text_length=lowerCamelCase_ , divergence_curve_discretization_size=lowerCamelCase_ , mauve_scaling_factor=lowerCamelCase_ , verbose=lowerCamelCase_ , seed=lowerCamelCase_ , )
return out
| 698 | 0 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
_lowercase = number_of_bytes // partitions
_lowercase = []
for i in range(lowerCAmelCase__ ):
_lowercase = i * bytes_per_partition + 1
_lowercase = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['GLPNFeatureExtractor']
_lowerCamelCase = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 572 | 0 |
'''simple docstring'''
from math import factorial, radians
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int = 18 , SCREAMING_SNAKE_CASE : int = 10 ):
UpperCAmelCase = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
UpperCAmelCase = radians(SCREAMING_SNAKE_CASE )
UpperCAmelCase = angle_in_radians
UpperCAmelCase = 3
UpperCAmelCase = -1
for _ in range(SCREAMING_SNAKE_CASE ):
result += (b * (angle_in_radians**a)) / factorial(SCREAMING_SNAKE_CASE )
UpperCAmelCase = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__import__('doctest').testmod()
| 447 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger()
@dataclass
class lowercase_ :
'''simple docstring'''
__lowerCAmelCase : nn.Module
__lowerCAmelCase : List[nn.Module] = field(default_factory=a )
__lowerCAmelCase : list = field(default_factory=a )
def snake_case_ ( self , a_ , a_ , a_ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = len(list(m.modules() ) ) == 1 or isinstance(a_ , nn.Convad ) or isinstance(a_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a_ )
def __call__( self , a_ ) -> Union[str, Any]:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a_ )
[x.remove() for x in self.handles]
return self
@property
def snake_case_ ( self ) -> int:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda a_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowercase_ :
'''simple docstring'''
__lowerCAmelCase : nn.Module
__lowerCAmelCase : nn.Module
__lowerCAmelCase : int = 0
__lowerCAmelCase : List = field(default_factory=a )
__lowerCAmelCase : List = field(default_factory=a )
def __call__( self , a_ ) -> Dict:
"""simple docstring"""
UpperCAmelCase = Tracker(self.dest )(a_ ).parametrized
UpperCAmelCase = Tracker(self.src )(a_ ).parametrized
UpperCAmelCase = list(filter(lambda a_ : type(a_ ) not in self.src_skip , a_ ) )
UpperCAmelCase = list(filter(lambda a_ : type(a_ ) not in self.dest_skip , a_ ) )
if len(a_ ) != len(a_ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(a_ )} operations while'''
F''' destination module has {len(a_ )}.''' )
for dest_m, src_m in zip(a_ , a_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : ResNetConfig , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : bool = True ):
print(f'''Converting {name}...''' )
with torch.no_grad():
UpperCAmelCase = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE ).eval()
UpperCAmelCase = ResNetForImageClassification(SCREAMING_SNAKE_CASE ).eval()
UpperCAmelCase = ModuleTransfer(src=SCREAMING_SNAKE_CASE , dest=SCREAMING_SNAKE_CASE )
UpperCAmelCase = torch.randn((1, 3, 224, 224) )
module_transfer(SCREAMING_SNAKE_CASE )
assert torch.allclose(from_model(SCREAMING_SNAKE_CASE ) , our_model(SCREAMING_SNAKE_CASE ).logits ), "The model logits don't match the original one."
UpperCAmelCase = f'''resnet{'-'.join(name.split('resnet' ) )}'''
print(SCREAMING_SNAKE_CASE )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=SCREAMING_SNAKE_CASE , )
# we can use the convnext one
UpperCAmelCase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=SCREAMING_SNAKE_CASE , )
print(f'''Pushed {checkpoint_name}''' )
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : str = None , SCREAMING_SNAKE_CASE : bool = True ):
UpperCAmelCase = 'imagenet-1k-id2label.json'
UpperCAmelCase = 1000
UpperCAmelCase = (1, num_labels)
UpperCAmelCase = 'huggingface/label-files'
UpperCAmelCase = num_labels
UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = partial(SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE )
UpperCAmelCase = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(SCREAMING_SNAKE_CASE , names_to_config[model_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return config, expected_shape
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
_a : Tuple = parser.parse_args()
_a : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 447 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , __a : List[Any] , __a : Union[str, Any]=7 , __a : Optional[Any]=3 , __a : Optional[int]=18 , __a : Tuple=30 , __a : str=400 , __a : int=True , __a : str=None , __a : Union[str, Any]=True , ) -> Dict:
"""simple docstring"""
__lowercase : str = size if size is not None else {"""height""": 18, """width""": 18}
__lowercase : List[Any] = parent
__lowercase : Any = batch_size
__lowercase : Any = num_channels
__lowercase : Dict = image_size
__lowercase : int = min_resolution
__lowercase : Union[str, Any] = max_resolution
__lowercase : Dict = do_resize
__lowercase : str = size
__lowercase : Tuple = apply_ocr
def lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , """do_resize""" ) )
self.assertTrue(hasattr(__a , """size""" ) )
self.assertTrue(hasattr(__a , """apply_ocr""" ) )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__lowercase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
__lowercase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __a )
self.assertIsInstance(encoding.boxes , __a )
# Test batched
__lowercase : List[str] = image_processing(__a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
__lowercase : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__lowercase : Dict = image_processing(__a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
__lowercase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__lowercase : List[Any] = image_processing(__a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase : Dict = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowercase : List[str] = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
__lowercase : Optional[int] = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
__lowercase : Tuple = image_processing(__a , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowercase : Tuple = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
__lowercase : List[Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __a )
self.assertListEqual(encoding.boxes , __a )
# with apply_OCR = False
__lowercase : List[str] = LayoutLMvaImageProcessor(apply_ocr=__a )
__lowercase : Union[str, Any] = image_processing(__a , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 715 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__a , """depth_multiplier""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Tuple , __a : str=13 , __a : Dict=3 , __a : List[Any]=32 , __a : Any=0.25 , __a : Any=8 , __a : Optional[int]=8 , __a : Optional[int]=6 , __a : Dict=32 , __a : Tuple=True , __a : List[Any]=True , __a : Optional[int]=True , __a : Tuple="relu6" , __a : Optional[Any]=1280 , __a : str=0.1 , __a : str=0.02 , __a : Optional[Any]=True , __a : Tuple=True , __a : Dict=10 , __a : Optional[Any]=None , ) -> Any:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Tuple = batch_size
__lowercase : Dict = num_channels
__lowercase : Optional[int] = image_size
__lowercase : int = depth_multiplier
__lowercase : str = depth_divisible_by
__lowercase : int = min_depth
__lowercase : Tuple = expand_ratio
__lowercase : Optional[int] = tf_padding
__lowercase : Dict = output_stride
__lowercase : Dict = first_layer_is_expansion
__lowercase : Optional[Any] = finegrained_output
__lowercase : str = hidden_act
__lowercase : Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__lowercase : Optional[int] = classifier_dropout_prob
__lowercase : int = use_labels
__lowercase : Optional[int] = is_training
__lowercase : Dict = num_labels
__lowercase : Tuple = initializer_range
__lowercase : Optional[Any] = scope
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[Any] = None
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = MobileNetVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : List[str] , __a : str , __a : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = self.num_labels
__lowercase : Dict = MobileNetVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : List[str] , __a : Tuple , __a : Any , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.num_labels
__lowercase : List[Any] = MobileNetVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase : str = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : List[str] = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : List[str] = False
_A : List[str] = False
_A : Optional[int] = False
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = MobileNetVaModelTester(self )
__lowercase : int = MobileNetVaConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : int = [*signature.parameters.keys()]
__lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__a : List[Any] , __a : Tuple , __a : List[str] ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Tuple = outputs.hidden_states
__lowercase : str = 16
self.assertEqual(len(__a ) , __a )
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = MobileNetVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__a )
__lowercase : str = self.default_image_processor
__lowercase : Tuple = prepare_img()
__lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : str = model(**__a )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : int = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : Dict = model.to(__a )
__lowercase : Tuple = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[int] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Any = outputs.logits
# verify the logits
__lowercase : Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __a )
__lowercase : str = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) ) | 649 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = CpmAntTokenizer
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def a ( self : Optional[Any] ) -> Any:
super().setUp()
__snake_case = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def a ( self : List[Any] ) -> Dict:
__snake_case = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
__snake_case = '今天天气真好!'
__snake_case = ['今天', '天气', '真', '好', '!']
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = '今天天气真好!'
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def A__ ( A_ ) -> List[str]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def A__ ( A_ ) -> str:
# word like '180' or '身高' or '神'
for char in word:
_lowercase = ord(A_ )
if not _is_chinese_char(A_ ):
return 0
return 1
def A__ ( A_ ) -> int:
_lowercase = set()
for token in tokens:
_lowercase = len(A_ ) > 1 and is_chinese(A_ )
if chinese_word:
word_set.add(A_ )
_lowercase = list(A_ )
return word_list
def A__ ( A_ , A_ ) -> Optional[int]:
if not chinese_word_set:
return bert_tokens
_lowercase = max([len(A_ ) for w in chinese_word_set] )
_lowercase = bert_tokens
_lowercase , _lowercase = 0, len(A_ )
while start < end:
_lowercase = True
if is_chinese(bert_word[start] ):
_lowercase = min(end - start , A_ )
for i in range(A_ , 1 , -1 ):
_lowercase = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowercase = "##" + bert_word[j]
_lowercase = start + i
_lowercase = False
break
if single_word:
start += 1
return bert_word
def A__ ( A_ , A_ , A_ ) -> Dict:
_lowercase = []
for i in range(0 , len(A_ ) , 100 ):
_lowercase = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowercase = [get_chinese_word(A_ ) for r in res]
ltp_res.extend(A_ )
assert len(A_ ) == len(A_ )
_lowercase = []
for i in range(0 , len(A_ ) , 100 ):
_lowercase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=A_ , truncation=A_ , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(A_ ) == len(A_ )
_lowercase = []
for input_ids, chinese_word in zip(A_ , A_ ):
_lowercase = []
for id in input_ids:
_lowercase = bert_tokenizer._convert_id_to_token(A_ )
input_tokens.append(A_ )
_lowercase = add_sub_symbol(A_ , A_ )
_lowercase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(A_ ):
if token[:2] == "##":
_lowercase = token[2:]
# save chinese tokens' pos
if len(A_ ) == 1 and _is_chinese_char(ord(A_ ) ):
ref_id.append(A_ )
ref_ids.append(A_ )
assert len(A_ ) == len(A_ )
return ref_ids
def A__ ( A_ ) -> str:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , "r" , encoding="utf-8" ) as f:
_lowercase = f.readlines()
_lowercase = [line.strip() for line in data if len(A_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowercase = LTP(args.ltp ) # faster in GPU device
_lowercase = BertTokenizer.from_pretrained(args.bert )
_lowercase = prepare_ref(A_ , A_ , A_ )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
_lowercase = [json.dumps(A_ ) + "\n" for ref in ref_ids]
f.writelines(A_ )
if __name__ == "__main__":
__magic_name__ : Optional[Any] = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
__magic_name__ : Dict = parser.parse_args()
main(args)
| 497 | 0 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_lowerCAmelCase = 2048
_lowerCAmelCase = 4096
_lowerCAmelCase = 42
_lowerCAmelCase = os.environ.pop("""PROCESS_TRAIN""", """false""")
_lowerCAmelCase = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def lowercase ( _a ) -> List[Any]:
def choose_first(_a ,_a=False ):
assert isinstance(_a ,_a )
if len(_a ) == 1:
UpperCAmelCase_: Optional[Any] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
UpperCAmelCase_: Dict = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
UpperCAmelCase_: Union[str, Any] = {"id": example["id"]}
UpperCAmelCase_: Union[str, Any] = example["annotations"]
UpperCAmelCase_: List[str] = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
UpperCAmelCase_: Tuple = ["yes"] if 1 in yes_no_answer else ["no"]
UpperCAmelCase_: Union[str, Any] = []
UpperCAmelCase_: Optional[Any] = []
UpperCAmelCase_: int = ["<cls>"]
else:
UpperCAmelCase_: Dict = ["short"]
UpperCAmelCase_: int = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
UpperCAmelCase_: Dict = ["long"]
UpperCAmelCase_: str = choose_first(annotation["long_answer"] ,is_long_answer=_a )
UpperCAmelCase_: List[str] = []
answer.update(_a )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
UpperCAmelCase_: Any = True
else:
UpperCAmelCase_: Optional[int] = False
UpperCAmelCase_: Tuple = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] ,_a ) for k in cols ):
raise ValueError("Issue in ID" ,example["id"] )
return answer
def lowercase ( _a ,_a=False ) -> List[Any]:
UpperCAmelCase_: Any = _get_single_answer(_a )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCAmelCase_: Union[str, Any] = example["document"]["tokens"]
UpperCAmelCase_: Optional[Any] = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(_a ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
UpperCAmelCase_: Optional[int] = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
UpperCAmelCase_: Any = example["document"]["tokens"]
UpperCAmelCase_: Tuple = answer["start_token"]
UpperCAmelCase_: Tuple = answer["end_token"]
UpperCAmelCase_: Tuple = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
UpperCAmelCase_: List[Any] = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
UpperCAmelCase_: Tuple = doc["is_html"][answer["start_token"] : answer["end_token"]]
UpperCAmelCase_: Dict = doc["token"][answer["start_token"] : answer["end_token"]]
UpperCAmelCase_: int = " ".join([old[i] for i in range(len(_a ) ) if not is_html[i]] )
if new != old:
print("ID:" ,example["id"] )
print("New:" ,_a ,end="\n" )
print("Old:" ,_a ,end="\n\n" )
return {
"context": " ".join(_a ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowercase ( _a ,_a ,_a=2048 ,_a=4096 ,_a=True ) -> Dict:
# overlap will be of doc_stride - q_len
UpperCAmelCase_: List[Any] = get_context_and_ans(_a ,assertion=_a )
UpperCAmelCase_: List[str] = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
UpperCAmelCase_: Dict = tokenizer(example["question"]["text"] ,out["context"] ).input_ids
UpperCAmelCase_: Union[str, Any] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCAmelCase_: int = []
UpperCAmelCase_: Any = []
UpperCAmelCase_: Optional[int] = input_ids[:q_len]
UpperCAmelCase_: str = range(_a ,len(_a ) ,max_length - doc_stride )
for i in doc_start_indices:
UpperCAmelCase_: Union[str, Any] = i + max_length - q_len
UpperCAmelCase_: Optional[Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(_a ),
"end_token": [-100] * len(_a ),
"category": category,
},
}
UpperCAmelCase_: Optional[int] = out["context"].split()
UpperCAmelCase_: int = splitted_context[answer["end_token"]]
UpperCAmelCase_: Any = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) ,add_special_tokens=_a ,).input_ids )
UpperCAmelCase_: List[Any] = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) ,add_special_tokens=_a ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
UpperCAmelCase_: Dict = len(tokenizer(_a ,add_special_tokens=_a ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
UpperCAmelCase_: Optional[Any] = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
UpperCAmelCase_: Dict = answer["start_token"]
UpperCAmelCase_: Union[str, Any] = answer["end_token"]
if assertion:
UpperCAmelCase_: Union[str, Any] = tokenizer.decode(_a )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" ,answer["span"] )
print("NEW:" ,_a ,end="\n\n" )
if len(_a ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
UpperCAmelCase_: List[str] = input_ids[:q_len]
UpperCAmelCase_: Optional[int] = range(_a ,len(_a ) ,max_length - doc_stride )
UpperCAmelCase_: Any = []
UpperCAmelCase_: Any = []
UpperCAmelCase_: str = []
UpperCAmelCase_: Dict = [] # null, yes, no, long, short
for i in doc_start_indices:
UpperCAmelCase_: int = i + max_length - q_len
UpperCAmelCase_: List[Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
UpperCAmelCase_: Tuple = start_token - i + q_len
UpperCAmelCase_: List[str] = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
UpperCAmelCase_: Optional[int] = -100
UpperCAmelCase_: List[str] = -100
answers_category.append("null" )
UpperCAmelCase_: List[str] = inputs[-1][start_token : end_token + 1]
answers_start_token.append(_a )
answers_end_token.append(_a )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" ,example["id"] )
print("New:" ,tokenizer.decode(_a ) )
print("Old:" ,tokenizer.decode(_a ) ,end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowercase ( _a ,_a ,_a=2048 ,_a=4096 ,_a=False ) -> Dict:
UpperCAmelCase_: Tuple = get_strided_contexts_and_ans(
_a ,_a ,doc_stride=_a ,max_length=_a ,assertion=_a ,)
return example
def lowercase ( _a ,_a ) -> Tuple:
with jsonlines.open(_a ,"a" ) as writer:
for example in tqdm(_a ,total=len(_a ) ,desc="Saving samples ... " ):
UpperCAmelCase_: List[Any] = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] ,labels["start_token"] ,labels["end_token"] ,labels["category"] ,):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_lowerCAmelCase = load_dataset("""natural_questions""")
_lowerCAmelCase = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
_lowerCAmelCase = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
_lowerCAmelCase = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
_lowerCAmelCase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_lowerCAmelCase = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
_lowerCAmelCase = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name) | 306 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( snake_case__ , unittest.TestCase ):
snake_case_ = TransfoXLTokenizer
snake_case_ = False
snake_case_ = False
def snake_case_ ( self ):
"""simple docstring"""
super().setUp()
UpperCAmelCase_: Any = [
"<unk>",
"[CLS]",
"[SEP]",
"want",
"unwanted",
"wa",
"un",
"running",
",",
"low",
"l",
]
UpperCAmelCase_: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def snake_case_ ( self , **A__ ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A__ )
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = "<unk> UNwanted , running"
UpperCAmelCase_: List[str] = "<unk> unwanted, running"
return input_text, output_text
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Tuple = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A__ )
UpperCAmelCase_: Optional[int] = tokenizer.tokenize("<unk> UNwanted , running" )
self.assertListEqual(A__ , ["<unk>", "unwanted", ",", "running"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [0, 4, 8, 7] )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Tuple = TransfoXLTokenizer(lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["hello", "!", "how", "are", "you", "?"] )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Any = TransfoXLTokenizer(lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = TransfoXLTokenizer(lower_case=A__ )
UpperCAmelCase_: Optional[Any] = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"
UpperCAmelCase_: Optional[int] = [
"Hello",
"(",
"bracket",
")",
"and",
"side",
"@-@",
"scrolled",
"[",
"and",
"]",
"Henry",
"'s",
"$",
"5",
"@,@",
"000",
"with",
"3",
"@.@",
"34",
"m",
".",
"What",
"'s",
"up",
"!",
"?",
]
self.assertListEqual(tokenizer.tokenize(A__ ) , A__ )
self.assertEqual(tokenizer.convert_tokens_to_string(A__ ) , A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Dict = self.get_tokenizer()
UpperCAmelCase_: Optional[Any] = len(A__ )
tokenizer.add_tokens(["new1", "new2"] )
tokenizer.move_added_token("new1" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(A__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("new1" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , "new1" ) | 306 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = '''donut-swin'''
SCREAMING_SNAKE_CASE__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : str , lowercase : str=2_24 , lowercase : Optional[Any]=4 , lowercase : Optional[int]=3 , lowercase : Dict=96 , lowercase : Optional[int]=[2, 2, 6, 2] , lowercase : Optional[Any]=[3, 6, 12, 24] , lowercase : Tuple=7 , lowercase : Dict=4.0 , lowercase : Tuple=True , lowercase : Dict=0.0 , lowercase : int=0.0 , lowercase : int=0.1 , lowercase : List[Any]="gelu" , lowercase : Optional[Any]=False , lowercase : Tuple=0.0_2 , lowercase : int=1E-5 , **lowercase : str , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase : Any = image_size
UpperCAmelCase : Any = patch_size
UpperCAmelCase : Optional[Any] = num_channels
UpperCAmelCase : Union[str, Any] = embed_dim
UpperCAmelCase : str = depths
UpperCAmelCase : Dict = len(lowercase )
UpperCAmelCase : Dict = num_heads
UpperCAmelCase : Union[str, Any] = window_size
UpperCAmelCase : Any = mlp_ratio
UpperCAmelCase : Optional[int] = qkv_bias
UpperCAmelCase : List[Any] = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = drop_path_rate
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Any = use_absolute_embeddings
UpperCAmelCase : Optional[Any] = layer_norm_eps
UpperCAmelCase : List[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase : Dict = int(embed_dim * 2 ** (len(lowercase ) - 1) )
| 595 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : List[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
snake_case_ : List[str] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowercase_ ( _lowercase : List[Any] , _lowercase : str , _lowercase : Tuple , _lowercase : str , _lowercase : List[Any] ):
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase : Dict = getattr(_lowercase , _lowercase )
if weight_type is not None:
UpperCAmelCase : Any = getattr(_lowercase , _lowercase ).shape
else:
UpperCAmelCase : str = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
UpperCAmelCase : Tuple = value
elif weight_type == "weight_v":
UpperCAmelCase : Optional[Any] = value
elif weight_type == "bias":
UpperCAmelCase : Union[str, Any] = value
else:
UpperCAmelCase : Optional[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowercase_ ( _lowercase : Optional[Any] , _lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase : Dict = []
UpperCAmelCase : Any = fairseq_model.state_dict()
UpperCAmelCase : Tuple = hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase : Tuple = True
if "*" in mapped_key:
UpperCAmelCase : Dict = name.split(_lowercase )[0].split("." )[-2]
UpperCAmelCase : Any = mapped_key.replace("*" , _lowercase )
if "weight_g" in name:
UpperCAmelCase : Any = "weight_g"
elif "weight_v" in name:
UpperCAmelCase : List[Any] = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
UpperCAmelCase : Union[str, Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase : List[Any] = "weight"
else:
UpperCAmelCase : Optional[int] = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowercase_ ( _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : Dict = full_name.split("conv_layers." )[-1]
UpperCAmelCase : Optional[Any] = name.split("." )
UpperCAmelCase : Optional[Any] = int(items[0] )
UpperCAmelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase : Tuple = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowercase )
@torch.no_grad()
def lowercase_ ( _lowercase : List[Any] , _lowercase : Union[str, Any] , _lowercase : Optional[Any]=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = torch.load(_lowercase )
UpperCAmelCase : Optional[int] = WavLMConfigOrig(checkpoint["cfg"] )
UpperCAmelCase : Union[str, Any] = WavLMOrig(_lowercase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
UpperCAmelCase : List[str] = WavLMConfig.from_pretrained(_lowercase )
else:
UpperCAmelCase : int = WavLMConfig()
UpperCAmelCase : List[str] = WavLMModel(_lowercase )
recursively_load_weights(_lowercase , _lowercase )
hf_wavlm.save_pretrained(_lowercase )
if __name__ == "__main__":
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
snake_case_ : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 595 | 1 |
import tensorflow as tf
from ...tf_utils import shape_list
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1 , lowerCamelCase__=False , **lowerCamelCase__ ):
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
__UpperCamelCase : Dict =vocab_size
__UpperCamelCase : Any =d_embed
__UpperCamelCase : Optional[Any] =d_proj
__UpperCamelCase : Any =cutoffs + [vocab_size]
__UpperCamelCase : Optional[Any] =[0] + self.cutoffs
__UpperCamelCase : Union[str, Any] =div_val
__UpperCamelCase : List[Any] =self.cutoffs[0]
__UpperCamelCase : Optional[int] =len(self.cutoffs ) - 1
__UpperCamelCase : str =self.shortlist_size + self.n_clusters
__UpperCamelCase : Union[str, Any] =keep_order
__UpperCamelCase : str =[]
__UpperCamelCase : Optional[int] =[]
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if self.n_clusters > 0:
__UpperCamelCase : Any =self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=lowerCamelCase__ , name='cluster_weight' )
__UpperCamelCase : str =self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=lowerCamelCase__ , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
__UpperCamelCase : Optional[int] =self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=lowerCamelCase__ , name=f'out_projs_._{i}' , )
self.out_projs.append(lowerCamelCase__ )
else:
self.out_projs.append(lowerCamelCase__ )
__UpperCamelCase : Dict =self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=lowerCamelCase__ , name=f'out_layers_._{i}_._weight' , )
__UpperCamelCase : Dict =self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=lowerCamelCase__ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
__UpperCamelCase , __UpperCamelCase : Tuple =self.cutoff_ends[i], self.cutoff_ends[i + 1]
__UpperCamelCase : Dict =self.d_embed // (self.div_val**i)
__UpperCamelCase : Dict =self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=lowerCamelCase__ , name=f'out_projs_._{i}' )
self.out_projs.append(lowerCamelCase__ )
__UpperCamelCase : str =self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=lowerCamelCase__ , name=f'out_layers_._{i}_._weight' , )
__UpperCamelCase : Tuple =self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=lowerCamelCase__ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(lowerCamelCase__ )
@staticmethod
def __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =x
if proj is not None:
__UpperCamelCase : str =tf.einsum('ibd,ed->ibe' , lowerCamelCase__ , lowerCamelCase__ )
return tf.einsum('ibd,nd->ibn' , lowerCamelCase__ , lowerCamelCase__ ) + b
@staticmethod
def __lowercase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =shape_list(lowerCamelCase__ )
__UpperCamelCase : Tuple =tf.range(lp_size[0] , dtype=target.dtype )
__UpperCamelCase : str =tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__=False ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =0
if self.n_clusters == 0:
__UpperCamelCase : Union[str, Any] =self._logit(lowerCamelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
__UpperCamelCase : Optional[int] =tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCamelCase__ , logits=lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =tf.nn.log_softmax(lowerCamelCase__ , axis=-1 )
else:
__UpperCamelCase : Optional[int] =shape_list(lowerCamelCase__ )
__UpperCamelCase : int =[]
__UpperCamelCase : List[Any] =tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
__UpperCamelCase , __UpperCamelCase : int =self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
__UpperCamelCase : Union[str, Any] =(target >= l_idx) & (target < r_idx)
__UpperCamelCase : Dict =tf.where(lowerCamelCase__ )
__UpperCamelCase : int =tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ ) - l_idx
if self.div_val == 1:
__UpperCamelCase : Optional[int] =self.out_layers[0][0][l_idx:r_idx]
__UpperCamelCase : List[str] =self.out_layers[0][1][l_idx:r_idx]
else:
__UpperCamelCase : Tuple =self.out_layers[i][0]
__UpperCamelCase : Tuple =self.out_layers[i][1]
if i == 0:
__UpperCamelCase : int =tf.concat([cur_W, self.cluster_weight] , 0 )
__UpperCamelCase : Tuple =tf.concat([cur_b, self.cluster_bias] , 0 )
__UpperCamelCase : Optional[Any] =self._logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.out_projs[0] )
__UpperCamelCase : Tuple =tf.nn.log_softmax(lowerCamelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
__UpperCamelCase : Any =tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Dict =self._gather_logprob(lowerCamelCase__ , lowerCamelCase__ )
else:
__UpperCamelCase : List[str] =self._logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.out_projs[i] )
__UpperCamelCase : List[Any] =tf.nn.log_softmax(lowerCamelCase__ )
__UpperCamelCase : Any =self.cutoffs[0] + i - 1 # No probability for the head cluster
__UpperCamelCase : str =head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCamelCase__ )
if target is not None:
__UpperCamelCase : Any =tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Any =tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =self._gather_logprob(lowerCamelCase__ , lowerCamelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCamelCase__ , -cur_logprob , shape_list(lowerCamelCase__ ) )
__UpperCamelCase : Dict =tf.concat(lowerCamelCase__ , axis=-1 )
if target is not None:
if return_mean:
__UpperCamelCase : Dict =tf.reduce_mean(lowerCamelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCamelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCamelCase__ , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 154 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A_ :List[str] = None
A_ :Dict = logging.get_logger(__name__)
A_ :Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A_ :Union[str, Any] = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
A_ :Union[str, Any] = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
A_ :Any = '''▁'''
# Segments (not really needed)
A_ :Tuple = 0
A_ :Union[str, Any] = 1
A_ :Tuple = 2
A_ :str = 3
A_ :int = 4
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : List[str] =VOCAB_FILES_NAMES
UpperCamelCase__ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Dict ="""left"""
UpperCamelCase__ : int =XLNetTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<sep>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<cls>" , lowerCamelCase__="<mask>" , lowerCamelCase__=["<eop>", "<eod>"] , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
vocab_file=lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
__UpperCamelCase : Union[str, Any] =3
__UpperCamelCase : Optional[int] =do_lower_case
__UpperCamelCase : Optional[Any] =remove_space
__UpperCamelCase : Union[str, Any] =keep_accents
__UpperCamelCase : Optional[Any] =vocab_file
__UpperCamelCase : List[str] =False if not self.vocab_file else True
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : List[str] =[self.sep_token_id]
__UpperCamelCase : str =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : Tuple =[self.sep_token_id]
__UpperCamelCase : Dict =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : Tuple =os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 154 | 1 |
import requests
_snake_case : Tuple = '' # <-- Put your OpenWeatherMap appid here!
_snake_case : Dict = 'https://api.openweathermap.org/data/2.5/'
def a_ ( lowerCAmelCase_ : str = "Chicago", lowerCAmelCase_ : str = APPID ):
return requests.get(URL_BASE + 'weather', params=locals() ).json()
def a_ ( lowerCAmelCase_ : str = "Kolkata, India", lowerCAmelCase_ : str = APPID ):
return requests.get(URL_BASE + 'forecast', params=locals() ).json()
def a_ ( lowerCAmelCase_ : float = 55.68, lowerCAmelCase_ : float = 12.57, lowerCAmelCase_ : str = APPID ):
return requests.get(URL_BASE + 'onecall', params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
_snake_case : List[str] = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 53 | """simple docstring"""
import math
from datetime import datetime, timedelta
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = year % 19
lowerCAmelCase__ = year % 4
lowerCAmelCase__ = year % 7
lowerCAmelCase__ = math.floor(year / 100 )
lowerCAmelCase__ = math.floor((13 + 8 * leap_day_inhibits) / 25 )
lowerCAmelCase__ = leap_day_inhibits / 4
lowerCAmelCase__ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
lowerCAmelCase__ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
lowerCAmelCase__ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
lowerCAmelCase__ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase__ , 4 , 18 )
else:
return datetime(lowerCamelCase__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
__lowerCAmelCase : List[str] = "will be" if year > datetime.now().year else "was"
print(F"Easter in {year} {tense} {gauss_easter(year)}")
| 644 | 0 |
'''simple docstring'''
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =len(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =sum(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =[[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_UpperCamelCase =True
for i in range(1 , s + 1 ):
_UpperCamelCase =False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_UpperCamelCase =dp[i][j - 1]
if arr[i - 1] <= j:
_UpperCamelCase =dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_UpperCamelCase =s - 2 * j
break
return diff
| 271 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : int = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
lowerCAmelCase_ = """mobilenet_v2"""
def __init__( self : Tuple , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Dict=224 , UpperCamelCase__ : str=1.0 , UpperCamelCase__ : List[Any]=8 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : str=6 , UpperCamelCase__ : str=32 , UpperCamelCase__ : str=True , UpperCamelCase__ : int=True , UpperCamelCase__ : str="relu6" , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : List[str]=0.8 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : str=0.001 , UpperCamelCase__ : Dict=255 , **UpperCamelCase__ : Tuple , ) -> List[Any]:
super().__init__(**UpperCamelCase__ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_UpperCamelCase =num_channels
_UpperCamelCase =image_size
_UpperCamelCase =depth_multiplier
_UpperCamelCase =depth_divisible_by
_UpperCamelCase =min_depth
_UpperCamelCase =expand_ratio
_UpperCamelCase =output_stride
_UpperCamelCase =first_layer_is_expansion
_UpperCamelCase =finegrained_output
_UpperCamelCase =hidden_act
_UpperCamelCase =tf_padding
_UpperCamelCase =classifier_dropout_prob
_UpperCamelCase =initializer_range
_UpperCamelCase =layer_norm_eps
_UpperCamelCase =semantic_loss_ignore_index
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
lowerCAmelCase_ = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def UpperCamelCase__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def UpperCamelCase__ ( self : List[Any] ) -> float:
return 1E-4
| 271 | 1 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self : int ):
"""simple docstring"""
lowerCamelCase__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(a_ ) )
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
lowerCamelCase__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(a_ ) )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(a_ ) )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(a_ ) )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(a_ ) )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
lowerCamelCase__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowerCamelCase__ = """fp16"""
self.assertTrue(is_safetensors_compatible(a_ , variant=a_ ) )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
lowerCamelCase__ = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowerCamelCase__ = """fp16"""
self.assertTrue(is_safetensors_compatible(a_ , variant=a_ ) )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
lowerCamelCase__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
lowerCamelCase__ = """fp16"""
self.assertTrue(is_safetensors_compatible(a_ , variant=a_ ) )
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowerCamelCase__ = """fp16"""
self.assertFalse(is_safetensors_compatible(a_ , variant=a_ ) )
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
lowerCamelCase__ = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
lowerCamelCase__ = """fp16"""
self.assertTrue(is_safetensors_compatible(a_ , variant=a_ ) )
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
lowerCamelCase__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
lowerCamelCase__ = """fp16"""
self.assertTrue(is_safetensors_compatible(a_ , variant=a_ ) )
def _UpperCamelCase ( self : str ):
"""simple docstring"""
lowerCamelCase__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowerCamelCase__ = """fp16"""
self.assertFalse(is_safetensors_compatible(a_ , variant=a_ ) )
| 165 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a__ : Union[str, Any] = logging.get_logger(__name__)
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def _UpperCamelCase ( self : int , a_ : Union[str, Any] ):
"""simple docstring"""
if isinstance(a_ , a_ ):
lowerCamelCase__ = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self : Dict , a_ : Any , a_ : Optional[int] , a_ : Tuple ):
"""simple docstring"""
if len(a_ ) == 0 or len(a_ ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(a_ ) )
if isinstance(a_ , a_ ):
lowerCamelCase__ = [sequences]
lowerCamelCase__ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(a_ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCAmelCase_ )
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[int] , a_ : Tuple=ZeroShotClassificationArgumentHandler() , *a_ : Optional[Any] , **a_ : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = args_parser
super().__init__(*a_ , **a_ )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _UpperCamelCase ( self : str , a_ : List[Any] , a_ : Optional[Any]=True , a_ : List[Any]=True , a_ : str=TruncationStrategy.ONLY_FIRST , **a_ : Any ):
"""simple docstring"""
lowerCamelCase__ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
lowerCamelCase__ = self.tokenizer.eos_token
try:
lowerCamelCase__ = self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=a_ , )
except Exception as e:
if "too short" in str(a_ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
lowerCamelCase__ = self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _UpperCamelCase ( self : List[str] , **a_ : Union[str, Any] ):
"""simple docstring"""
if kwargs.get("""multi_class""" , a_ ) is not None:
lowerCamelCase__ = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
lowerCamelCase__ = {}
if "candidate_labels" in kwargs:
lowerCamelCase__ = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
lowerCamelCase__ = kwargs["""hypothesis_template"""]
lowerCamelCase__ = {}
if "multi_label" in kwargs:
lowerCamelCase__ = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self : Any , a_ : Union[str, List[str]] , *a_ : str , **a_ : Dict , ):
"""simple docstring"""
if len(a_ ) == 0:
pass
elif len(a_ ) == 1 and "candidate_labels" not in kwargs:
lowerCamelCase__ = args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''' )
return super().__call__(a_ , **a_ )
def _UpperCamelCase ( self : Optional[int] , a_ : int , a_ : List[str]=None , a_ : Tuple="This example is {}." ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = self._args_parser(a_ , a_ , a_ )
for i, (candidate_label, sequence_pair) in enumerate(zip(a_ , a_ ) ):
lowerCamelCase__ = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(a_ ) - 1,
**model_input,
}
def _UpperCamelCase ( self : Optional[Any] , a_ : Any ):
"""simple docstring"""
lowerCamelCase__ = inputs["""candidate_label"""]
lowerCamelCase__ = inputs["""sequence"""]
lowerCamelCase__ = {k: inputs[k] for k in self.tokenizer.model_input_names}
lowerCamelCase__ = self.model(**a_ )
lowerCamelCase__ = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _UpperCamelCase ( self : Union[str, Any] , a_ : List[Any] , a_ : Tuple=False ):
"""simple docstring"""
lowerCamelCase__ = [outputs["""candidate_label"""] for outputs in model_outputs]
lowerCamelCase__ = [outputs["""sequence"""] for outputs in model_outputs]
lowerCamelCase__ = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
lowerCamelCase__ = logits.shape[0]
lowerCamelCase__ = len(a_ )
lowerCamelCase__ = N // n
lowerCamelCase__ = logits.reshape((num_sequences, n, -1) )
if multi_label or len(a_ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
lowerCamelCase__ = self.entailment_id
lowerCamelCase__ = -1 if entailment_id == 0 else 0
lowerCamelCase__ = reshaped_outputs[..., [contradiction_id, entailment_id]]
lowerCamelCase__ = np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
lowerCamelCase__ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
lowerCamelCase__ = reshaped_outputs[..., self.entailment_id]
lowerCamelCase__ = np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
lowerCamelCase__ = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 165 | 1 |
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
| 107 | import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _lowerCamelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=10 , lowerCAmelCase=3 , lowerCAmelCase=2 , lowerCAmelCase=2 , lowerCAmelCase=2 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=10 , lowerCAmelCase=0.02 , lowerCAmelCase=0.9 , lowerCAmelCase=None , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Tuple= parent
SCREAMING_SNAKE_CASE__: Union[str, Any]= batch_size
SCREAMING_SNAKE_CASE__: List[str]= image_size
SCREAMING_SNAKE_CASE__: List[Any]= num_channels
SCREAMING_SNAKE_CASE__: str= patch_size
SCREAMING_SNAKE_CASE__: Optional[int]= tubelet_size
SCREAMING_SNAKE_CASE__: Dict= num_frames
SCREAMING_SNAKE_CASE__: Optional[Any]= is_training
SCREAMING_SNAKE_CASE__: Dict= use_labels
SCREAMING_SNAKE_CASE__: Tuple= hidden_size
SCREAMING_SNAKE_CASE__: Any= num_hidden_layers
SCREAMING_SNAKE_CASE__: List[Any]= num_attention_heads
SCREAMING_SNAKE_CASE__: Dict= intermediate_size
SCREAMING_SNAKE_CASE__: str= hidden_act
SCREAMING_SNAKE_CASE__: str= hidden_dropout_prob
SCREAMING_SNAKE_CASE__: str= attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__: Tuple= type_sequence_label_size
SCREAMING_SNAKE_CASE__: List[str]= initializer_range
SCREAMING_SNAKE_CASE__: str= mask_ratio
SCREAMING_SNAKE_CASE__: Any= scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
SCREAMING_SNAKE_CASE__: Tuple= (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__: Dict= (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
SCREAMING_SNAKE_CASE__: str= int(mask_ratio * self.seq_length )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[Any]= floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__: List[str]= None
if self.use_labels:
SCREAMING_SNAKE_CASE__: List[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__: Dict= self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ) -> List[str]:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Any= VideoMAEModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: Optional[Any]= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Any= VideoMAEForPreTraining(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
SCREAMING_SNAKE_CASE__: str= torch.ones((self.num_masks,) )
SCREAMING_SNAKE_CASE__: str= torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
SCREAMING_SNAKE_CASE__: List[str]= mask.expand(self.batch_size , -1 ).bool()
SCREAMING_SNAKE_CASE__: List[str]= model(lowerCAmelCase , lowerCAmelCase )
# model only returns predictions for masked patches
SCREAMING_SNAKE_CASE__: int= mask.sum().item()
SCREAMING_SNAKE_CASE__: Dict= 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= config_and_inputs
SCREAMING_SNAKE_CASE__: List[Any]= {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__a = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__a = False
__a = False
__a = False
__a = False
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[int]= VideoMAEModelTester(self )
SCREAMING_SNAKE_CASE__: Tuple= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ) -> List[str]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= copy.deepcopy(lowerCAmelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
SCREAMING_SNAKE_CASE__: str= torch.ones((self.model_tester.num_masks,) )
SCREAMING_SNAKE_CASE__: int= torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
SCREAMING_SNAKE_CASE__: int= mask.expand(self.model_tester.batch_size , -1 ).bool()
SCREAMING_SNAKE_CASE__: Union[str, Any]= bool_masked_pos.to(lowerCAmelCase )
if return_labels:
if model_class in [
*get_values(lowerCAmelCase ),
]:
SCREAMING_SNAKE_CASE__: Dict= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def UpperCamelCase_ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def UpperCamelCase_ ( self ) -> List[str]:
pass
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__: Optional[Any]= model_class(lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__: Dict= model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__: List[Any]= model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__: Union[str, Any]= [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__: Tuple= ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Any= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: List[Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__: int= VideoMAEModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> int:
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[Any]= self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__: int= True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__: Dict= self.model_tester.seq_length - self.model_tester.num_masks
SCREAMING_SNAKE_CASE__: Union[str, Any]= (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
SCREAMING_SNAKE_CASE__: Union[str, Any]= True
SCREAMING_SNAKE_CASE__: Optional[Any]= False
SCREAMING_SNAKE_CASE__: Optional[int]= True
SCREAMING_SNAKE_CASE__: Union[str, Any]= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__: int= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__: Any= outputs.attentions
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__: Optional[Any]= True
SCREAMING_SNAKE_CASE__: List[Any]= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__: Dict= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= outputs.attentions
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= len(lowerCAmelCase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__: Optional[int]= True
SCREAMING_SNAKE_CASE__: Optional[Any]= True
SCREAMING_SNAKE_CASE__: List[Any]= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__: Optional[Any]= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__: Optional[int]= outputs.attentions
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCamelCase_ ( self ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: int= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__: Dict= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__: Any= outputs.hidden_states
SCREAMING_SNAKE_CASE__: int= self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= self.model_tester.seq_length - self.model_tester.num_masks
SCREAMING_SNAKE_CASE__: Optional[int]= num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__: List[str]= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__: Optional[Any]= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase_ ( self ) -> str:
pass
def A__ ( ):
SCREAMING_SNAKE_CASE__: Optional[int]= hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
SCREAMING_SNAKE_CASE__: List[str]= np.load(snake_case_ )
return list(snake_case_ )
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self ) -> List[str]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Dict= VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= self.default_image_processor
SCREAMING_SNAKE_CASE__: Any= prepare_video()
SCREAMING_SNAKE_CASE__: List[Any]= image_processor(lowerCAmelCase , return_tensors='''pt''' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__: Union[str, Any]= model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__: Any= torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.tensor([0.3669, -0.0688, -0.2421] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
@slow
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: int= VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= self.default_image_processor
SCREAMING_SNAKE_CASE__: Dict= prepare_video()
SCREAMING_SNAKE_CASE__: int= image_processor(lowerCAmelCase , return_tensors='''pt''' ).to(lowerCAmelCase )
# add boolean mask, indicating which patches to mask
SCREAMING_SNAKE_CASE__: List[Any]= hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
SCREAMING_SNAKE_CASE__: List[Any]= torch.load(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__: str= model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__: Tuple= torch.Size([1, 1408, 1536] )
SCREAMING_SNAKE_CASE__: Optional[int]= torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=lowerCAmelCase )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
SCREAMING_SNAKE_CASE__: str= torch.tensor([0.5142] , device=lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
SCREAMING_SNAKE_CASE__: Dict= VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=lowerCAmelCase ).to(
lowerCAmelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE__: List[Any]= model(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.tensor(torch.tensor([0.6469] ) , device=lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase , atol=1e-4 ) )
| 107 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
A = LEDTokenizer
A = LEDTokenizerFast
A = True
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
super().setUp()
_UpperCamelCase :List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_UpperCamelCase :Any = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
_UpperCamelCase :List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_UpperCamelCase :Optional[Any] = {'''unk_token''': '''<unk>'''}
_UpperCamelCase :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase( self , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def _UpperCamelCase( self ) -> Union[str, Any]:
"""simple docstring"""
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def _UpperCamelCase( self ) -> List[str]:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def _UpperCamelCase( self ) -> Any:
"""simple docstring"""
_UpperCamelCase :List[str] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_UpperCamelCase :Dict = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCamelCase :Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__ , max_length=len(SCREAMING_SNAKE_CASE__ ) , padding=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_UpperCamelCase :Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@require_torch
def _UpperCamelCase( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCamelCase :Tuple = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
self.assertIn('''input_ids''' , SCREAMING_SNAKE_CASE__ )
self.assertIn('''attention_mask''' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('''labels''' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('''decoder_attention_mask''' , SCREAMING_SNAKE_CASE__ )
@require_torch
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :int = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCamelCase :Dict = tokenizer(text_target=SCREAMING_SNAKE_CASE__ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCamelCase :List[str] = tokenizer(
['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def _UpperCamelCase( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :Any = ['''A long paragraph for summarization.''']
_UpperCamelCase :Optional[int] = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCamelCase :Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
_UpperCamelCase :str = tokenizer(text_target=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
_UpperCamelCase :Tuple = inputs['''input_ids''']
_UpperCamelCase :List[str] = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _UpperCamelCase( self ) -> Optional[int]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCamelCase :Optional[int] = ['''Summary of the text.''', '''Another summary.''']
_UpperCamelCase :List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_UpperCamelCase :Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Tuple = [[0] * len(SCREAMING_SNAKE_CASE__ ) for x in encoded_output['''input_ids''']]
_UpperCamelCase :Optional[int] = tokenizer.pad(SCREAMING_SNAKE_CASE__ )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self ) -> Optional[int]:
"""simple docstring"""
pass
def _UpperCamelCase( self ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_UpperCamelCase :Optional[int] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :List[str] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Optional[Any] = '''A, <mask> AllenNLP sentence.'''
_UpperCamelCase :Any = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :int = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_UpperCamelCase :Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_UpperCamelCase :Dict = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 355 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ :Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ :int = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def A_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
for attribute in key.split('''.''' ):
_UpperCamelCase :Any = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
_UpperCamelCase :Any = getattr(snake_case__ , snake_case__ ).shape
else:
_UpperCamelCase :Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
_UpperCamelCase :str = value
elif weight_type == "weight_g":
_UpperCamelCase :Dict = value
elif weight_type == "weight_v":
_UpperCamelCase :Optional[Any] = value
elif weight_type == "bias":
_UpperCamelCase :str = value
else:
_UpperCamelCase :int = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def A_ ( snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
_UpperCamelCase :Optional[int] = []
_UpperCamelCase :List[str] = fairseq_model.state_dict()
_UpperCamelCase :str = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase :Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == '''group''' , )
_UpperCamelCase :Dict = True
else:
for key, mapped_key in MAPPING.items():
_UpperCamelCase :Optional[int] = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase :List[str] = True
if "*" in mapped_key:
_UpperCamelCase :List[str] = name.split(snake_case__ )[0].split('''.''' )[-2]
_UpperCamelCase :Tuple = mapped_key.replace('''*''' , snake_case__ )
if "weight_g" in name:
_UpperCamelCase :List[Any] = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase :Union[str, Any] = '''weight_v'''
elif "weight" in name:
_UpperCamelCase :List[Any] = '''weight'''
elif "bias" in name:
_UpperCamelCase :List[Any] = '''bias'''
else:
_UpperCamelCase :List[Any] = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(f"Unused weights: {unused_weights}" )
def A_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
_UpperCamelCase :Optional[int] = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase :Optional[int] = name.split('''.''' )
_UpperCamelCase :Optional[Any] = int(items[0] )
_UpperCamelCase :List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_UpperCamelCase :Optional[int] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_UpperCamelCase :Optional[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_UpperCamelCase :int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_UpperCamelCase :Dict = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case__ )
def A_ ( snake_case__ , snake_case__ ) -> List[str]:
_UpperCamelCase :str = SEWConfig()
if is_finetuned:
_UpperCamelCase :Optional[int] = model.wav_encoder.wav_model.cfg
else:
_UpperCamelCase :Dict = model.cfg
_UpperCamelCase :Dict = fs_config.conv_bias
_UpperCamelCase :int = eval(fs_config.conv_feature_layers )
_UpperCamelCase :List[Any] = [x[0] for x in conv_layers]
_UpperCamelCase :Optional[int] = [x[1] for x in conv_layers]
_UpperCamelCase :Optional[int] = [x[2] for x in conv_layers]
_UpperCamelCase :str = '''gelu'''
_UpperCamelCase :Optional[int] = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
_UpperCamelCase :List[Any] = 0.0
_UpperCamelCase :Optional[int] = fs_config.activation_fn.name
_UpperCamelCase :str = fs_config.encoder_embed_dim
_UpperCamelCase :Dict = 0.02
_UpperCamelCase :Optional[int] = fs_config.encoder_ffn_embed_dim
_UpperCamelCase :str = 1E-5
_UpperCamelCase :int = fs_config.encoder_layerdrop
_UpperCamelCase :Union[str, Any] = fs_config.encoder_attention_heads
_UpperCamelCase :List[str] = fs_config.conv_pos_groups
_UpperCamelCase :List[Any] = fs_config.conv_pos
_UpperCamelCase :List[str] = len(snake_case__ )
_UpperCamelCase :Optional[int] = fs_config.encoder_layers
_UpperCamelCase :Optional[int] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_UpperCamelCase :List[Any] = model.cfg
_UpperCamelCase :List[Any] = fs_config.final_dropout
_UpperCamelCase :Dict = fs_config.layerdrop
_UpperCamelCase :Any = fs_config.activation_dropout
_UpperCamelCase :List[str] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_UpperCamelCase :Optional[Any] = fs_config.attention_dropout
_UpperCamelCase :List[Any] = fs_config.dropout_input
_UpperCamelCase :Dict = fs_config.dropout
_UpperCamelCase :int = fs_config.mask_channel_length
_UpperCamelCase :Tuple = fs_config.mask_channel_prob
_UpperCamelCase :int = fs_config.mask_length
_UpperCamelCase :Dict = fs_config.mask_prob
_UpperCamelCase :List[Any] = '''Wav2Vec2FeatureExtractor'''
_UpperCamelCase :Optional[Any] = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def A_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=True ) -> int:
if is_finetuned:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_UpperCamelCase :Any = SEWConfig.from_pretrained(snake_case__ )
else:
_UpperCamelCase :List[str] = convert_config(model[0] , snake_case__ )
_UpperCamelCase :List[str] = model[0].eval()
_UpperCamelCase :Optional[Any] = True if config.feat_extract_norm == '''layer''' else False
_UpperCamelCase :str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
if is_finetuned:
if dict_path:
_UpperCamelCase :int = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCamelCase :List[Any] = target_dict.pad_index
_UpperCamelCase :str = target_dict.bos_index
_UpperCamelCase :int = target_dict.pad_index
_UpperCamelCase :Dict = target_dict.bos_index
_UpperCamelCase :int = target_dict.eos_index
_UpperCamelCase :str = len(target_dict.symbols )
_UpperCamelCase :Optional[int] = os.path.join(snake_case__ , '''vocab.json''' )
if not os.path.isdir(snake_case__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(snake_case__ ) )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , snake_case__ )
_UpperCamelCase :int = WavaVecaCTCTokenizer(
snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=snake_case__ , )
_UpperCamelCase :Dict = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
_UpperCamelCase :Tuple = SEWForCTC(snake_case__ )
else:
_UpperCamelCase :str = SEWModel(snake_case__ )
feature_extractor.save_pretrained(snake_case__ )
recursively_load_weights(snake_case__ , snake_case__ , snake_case__ )
hf_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase__ :List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCamelCase__ :Tuple = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 355 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __snake_case ( _UpperCAmelCase ):
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(_UpperCAmelCase )
return 2.0 * image - 1.0
class _A ( __UpperCAmelCase ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : VQModel , __SCREAMING_SNAKE_CASE : UNetaDModel , __SCREAMING_SNAKE_CASE : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE)
@torch.no_grad()
def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[torch.Tensor, PIL.Image.Image] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : Optional[int] = 100 , __SCREAMING_SNAKE_CASE : Optional[float] = 0.0 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = 1
elif isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor):
__a = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__SCREAMING_SNAKE_CASE)}')
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = preprocess(__SCREAMING_SNAKE_CASE)
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters()).dtype
__a = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=__SCREAMING_SNAKE_CASE)
__a = image.to(device=self.device , dtype=__SCREAMING_SNAKE_CASE)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=self.device)
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(__SCREAMING_SNAKE_CASE):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1)
__a = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# predict the noise residual
__a = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(__SCREAMING_SNAKE_CASE).sample
__a = torch.clamp(__SCREAMING_SNAKE_CASE , -1.0 , 1.0)
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(__SCREAMING_SNAKE_CASE)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE)
| 60 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__snake_case :List[Any] = None
__snake_case :Dict = logging.get_logger(__name__)
__snake_case :Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case :Union[str, Any] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__snake_case :Optional[Any] = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__snake_case :Optional[int] = '''▁'''
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : str = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ : Dict = BarthezTokenizer
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Tuple="<s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : Tuple="</s>" , __SCREAMING_SNAKE_CASE : int="<s>" , __SCREAMING_SNAKE_CASE : Any="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Any="<mask>" , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else mask_token
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = vocab_file
__a = False if not self.vocab_file else True
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(__SCREAMING_SNAKE_CASE):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__a = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__SCREAMING_SNAKE_CASE):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
| 60 | 1 |
'''simple docstring'''
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=64 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , lowerCamelCase=2 , lowerCamelCase=2 , lowerCamelCase=2 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=1 , ) -> List[Any]:
"""simple docstring"""
snake_case__ : List[str] = parent
snake_case__ : int = batch_size
snake_case__ : List[Any] = seq_length
snake_case__ : List[Any] = is_training
snake_case__ : Dict = use_input_mask
snake_case__ : List[Any] = use_token_type_ids
snake_case__ : Optional[Any] = use_labels
snake_case__ : Optional[Any] = vocab_size
snake_case__ : str = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : Union[str, Any] = num_attention_heads
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : List[Any] = hidden_act
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : int = max_position_embeddings
snake_case__ : Tuple = type_vocab_size
snake_case__ : Union[str, Any] = type_sequence_label_size
snake_case__ : List[Any] = initializer_range
snake_case__ : Any = num_labels
snake_case__ : List[Any] = num_choices
snake_case__ : Any = scope
snake_case__ : Dict = q_groups
snake_case__ : Dict = k_groups
snake_case__ : List[str] = v_groups
snake_case__ : Optional[int] = post_attention_groups
snake_case__ : Tuple = intermediate_groups
snake_case__ : Union[str, Any] = output_groups
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Optional[int] = None
if self.use_input_mask:
snake_case__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : int = None
snake_case__ : List[Any] = None
snake_case__ : Optional[Any] = None
if self.use_labels:
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self ) -> str:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Dict = SqueezeBertModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : str = model(lowerCamelCase , lowerCamelCase )
snake_case__ : Any = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : List[Any] = SqueezeBertForMaskedLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Tuple = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : Optional[Any] = SqueezeBertForQuestionAnswering(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : str = model(
lowerCamelCase , attention_mask=lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : int = self.num_labels
snake_case__ : Dict = SqueezeBertForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Dict = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : str = self.num_labels
snake_case__ : List[str] = SqueezeBertForTokenClassification(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Optional[int] = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : str = self.num_choices
snake_case__ : Dict = SqueezeBertForMultipleChoice(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Optional[Any] = model(
lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : str = self.prepare_config_and_inputs()
((snake_case__) ,(snake_case__) ,(snake_case__) ,(snake_case__) ,(snake_case__) ,(snake_case__)) : Optional[int] = config_and_inputs
snake_case__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
_lowerCAmelCase = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = False
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Optional[int] = SqueezeBertModelTester(self )
snake_case__ : List[Any] = ConfigTester(self , config_class=lowerCamelCase , dim=37 )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCamelCase )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCamelCase )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCamelCase )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCamelCase )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCamelCase )
@slow
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Optional[Any] = SqueezeBertModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : str = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
snake_case__ : int = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
snake_case__ : Union[str, Any] = model(lowerCamelCase )[0]
snake_case__ : str = torch.Size((1, 3) )
self.assertEqual(output.shape , lowerCamelCase )
snake_case__ : Union[str, Any] = torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-4 ) )
| 261 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
def _A ( snake_case__ : List[str] ):
snake_case__ : str = DPTConfig()
if "large" in checkpoint_url:
snake_case__ : Any = 10_24
snake_case__ : Union[str, Any] = 40_96
snake_case__ : Optional[int] = 24
snake_case__ : int = 16
snake_case__ : Optional[int] = [5, 11, 17, 23]
snake_case__ : Tuple = [2_56, 5_12, 10_24, 10_24]
snake_case__ : List[Any] = (1, 3_84, 3_84)
if "ade" in checkpoint_url:
snake_case__ : Dict = True
snake_case__ : Optional[int] = 1_50
snake_case__ : Dict = '''huggingface/label-files'''
snake_case__ : Optional[Any] = '''ade20k-id2label.json'''
snake_case__ : List[Any] = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='''dataset''' ) ) , '''r''' ) )
snake_case__ : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
snake_case__ : Optional[int] = idalabel
snake_case__ : List[str] = {v: k for k, v in idalabel.items()}
snake_case__ : List[str] = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def _A ( snake_case__ : int ):
snake_case__ : Optional[Any] = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def _A ( snake_case__ : Union[str, Any] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
snake_case__ : str = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
snake_case__ : List[Any] = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
snake_case__ : Union[str, Any] = name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
snake_case__ : Tuple = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
snake_case__ : Dict = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
snake_case__ : Any = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
snake_case__ : int = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
snake_case__ : str = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case__ : int = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
snake_case__ : Dict = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case__ : Optional[int] = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
snake_case__ : List[str] = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
snake_case__ : Optional[Any] = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
snake_case__ : str = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
snake_case__ : Dict = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
snake_case__ : Optional[Any] = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
snake_case__ : Optional[int] = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
snake_case__ : Any = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
snake_case__ : List[Any] = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
snake_case__ : str = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
snake_case__ : str = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
snake_case__ : List[Any] = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
snake_case__ : Union[str, Any] = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
snake_case__ : Optional[int] = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
snake_case__ : Optional[int] = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
snake_case__ : Tuple = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
snake_case__ : List[str] = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
snake_case__ : Tuple = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
snake_case__ : str = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
snake_case__ : Tuple = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
snake_case__ : Union[str, Any] = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
snake_case__ : Optional[Any] = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
snake_case__ : List[Any] = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
snake_case__ : int = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
snake_case__ : Any = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
snake_case__ : List[Any] = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
snake_case__ : Union[str, Any] = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
snake_case__ : Optional[Any] = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
snake_case__ : int = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
snake_case__ : Any = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def _A ( snake_case__ : Dict , snake_case__ : List[Any] ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Any = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
snake_case__ : Any = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
snake_case__ : Dict = in_proj_bias[: config.hidden_size]
snake_case__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def _A ( ):
snake_case__ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ : Dict = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def _A ( snake_case__ : Dict , snake_case__ : Any , snake_case__ : str , snake_case__ : Any ):
snake_case__ ,snake_case__ : Optional[Any] = get_dpt_config(snake_case__ )
# load original state_dict from URL
snake_case__ : Any = torch.hub.load_state_dict_from_url(snake_case__ , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
snake_case__ : Optional[Any] = state_dict.pop(snake_case__ )
snake_case__ : Union[str, Any] = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
snake_case__ : Tuple = DPTForSemanticSegmentation(snake_case__ ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
snake_case__ : List[Any] = 4_80 if '''ade''' in checkpoint_url else 3_84
snake_case__ : int = DPTImageProcessor(size=snake_case__ )
snake_case__ : List[Any] = prepare_img()
snake_case__ : str = image_processor(snake_case__ , return_tensors='''pt''' )
# forward pass
snake_case__ : Optional[int] = model(**snake_case__ ).logits if '''ade''' in checkpoint_url else model(**snake_case__ ).predicted_depth
# Assert logits
snake_case__ : Dict = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] )
if "ade" in checkpoint_url:
snake_case__ : List[Any] = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] )
assert outputs.shape == torch.Size(snake_case__ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , snake_case__ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , snake_case__ )
)
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=snake_case__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=snake_case__ , )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 261 | 1 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _a :
"""simple docstring"""
def __init__( self ,__SCREAMING_SNAKE_CASE ,):
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : Optional[int] = 13
SCREAMING_SNAKE_CASE : Dict = 7
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Tuple = 99
SCREAMING_SNAKE_CASE : Tuple = 32
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Union[str, Any] = 37
SCREAMING_SNAKE_CASE : Optional[Any] = 'gelu'
SCREAMING_SNAKE_CASE : Any = 0.1
SCREAMING_SNAKE_CASE : Optional[Any] = 0.1
SCREAMING_SNAKE_CASE : Optional[Any] = 512
SCREAMING_SNAKE_CASE : str = 16
SCREAMING_SNAKE_CASE : Union[str, Any] = 2
SCREAMING_SNAKE_CASE : int = 0.02
SCREAMING_SNAKE_CASE : int = 3
SCREAMING_SNAKE_CASE : List[Any] = 4
SCREAMING_SNAKE_CASE : int = None
def __a ( self ):
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE : List[str] = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self ):
(
SCREAMING_SNAKE_CASE
) : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : List[str] = TFEsmModel(config=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
SCREAMING_SNAKE_CASE : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : int = model(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,):
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Dict = TFEsmModel(config=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
SCREAMING_SNAKE_CASE : str = model(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : int = model(__SCREAMING_SNAKE_CASE ,encoder_hidden_states=__SCREAMING_SNAKE_CASE )
# Also check the case where encoder outputs are not passed
SCREAMING_SNAKE_CASE : List[Any] = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : int = TFEsmForMaskedLM(config=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : str = TFEsmForTokenClassification(config=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
SCREAMING_SNAKE_CASE : Any = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __a ( self ):
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
A = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
A = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
def __a ( self ):
SCREAMING_SNAKE_CASE : Dict = TFEsmModelTester(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,hidden_size=37 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __a ( self ):
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__SCREAMING_SNAKE_CASE )
def __a ( self ):
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def __a ( self ):
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __a ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Dict = TFEsmModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip('Protein models do not support embedding resizing.' )
def __a ( self ):
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def __a ( self ):
pass
def __a ( self ):
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = model_class(__SCREAMING_SNAKE_CASE )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
SCREAMING_SNAKE_CASE : Tuple = model.get_bias()
assert isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
for k, v in name.items():
assert isinstance(__SCREAMING_SNAKE_CASE ,tf.Variable )
else:
SCREAMING_SNAKE_CASE : List[str] = model.get_output_embeddings()
assert x is None
SCREAMING_SNAKE_CASE : Dict = model.get_bias()
assert name is None
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self ):
SCREAMING_SNAKE_CASE : List[str] = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : Optional[Any] = model(__SCREAMING_SNAKE_CASE )[0]
SCREAMING_SNAKE_CASE : Tuple = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,__SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : List[Any] = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def __a ( self ):
SCREAMING_SNAKE_CASE : Tuple = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
SCREAMING_SNAKE_CASE : Any = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE : List[Any] = model(__SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : str = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 707 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
A = CpmAntTokenizer
A = False
def __a ( self ):
super().setUp()
SCREAMING_SNAKE_CASE : Dict = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def __a ( self ):
SCREAMING_SNAKE_CASE : Dict = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
SCREAMING_SNAKE_CASE : Optional[Any] = '今天天气真好!'
SCREAMING_SNAKE_CASE : Optional[int] = ['今天', '天气', '真', '好', '!']
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = '今天天气真好!'
SCREAMING_SNAKE_CASE : Union[str, Any] = [tokenizer.bos_token] + tokens
SCREAMING_SNAKE_CASE : Dict = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) ,__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.decode(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
| 220 | 0 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
# TODO Update this
lowerCamelCase : List[str] = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __lowerCAmelCase (UpperCamelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : str = """esm"""
def __init__(self : str , UpperCamelCase : Optional[Any]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Tuple=None , UpperCamelCase : int=768 , UpperCamelCase : Union[str, Any]=12 , UpperCamelCase : List[Any]=12 , UpperCamelCase : Optional[Any]=3072 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : int=1026 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Union[str, Any]=1E-12 , UpperCamelCase : Tuple="absolute" , UpperCamelCase : int=True , UpperCamelCase : Any=None , UpperCamelCase : Dict=False , UpperCamelCase : Optional[int]=False , UpperCamelCase : int=None , UpperCamelCase : Dict=None , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , mask_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = emb_layer_norm_before
lowercase__ = token_dropout
lowercase__ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
lowercase__ = EsmFoldConfig()
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = EsmFoldConfig(**_UpperCAmelCase )
lowercase__ = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
lowercase__ = get_default_vocab_list()
else:
lowercase__ = vocab_list
else:
lowercase__ = None
lowercase__ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , _UpperCAmelCase ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = super().to_dict()
if isinstance(self.esmfold_config , _UpperCAmelCase ):
lowercase__ = self.esmfold_config.to_dict()
return output
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : List[Any] = False
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : Optional[Any] = False
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : str = 128
lowerCAmelCase__ : List[Any] = None
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
if self.trunk is None:
lowercase__ = TrunkConfig()
elif isinstance(self.trunk , _UpperCAmelCase ):
lowercase__ = TrunkConfig(**self.trunk )
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = asdict(self )
lowercase__ = self.trunk.to_dict()
return output
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : int = 48
lowerCAmelCase__ : Tuple = 1024
lowerCAmelCase__ : Any = 128
lowerCAmelCase__ : Dict = 32
lowerCAmelCase__ : List[Any] = 32
lowerCAmelCase__ : int = 32
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Optional[Any] = False
lowerCAmelCase__ : Dict = 4
lowerCAmelCase__ : Any = 128
lowerCAmelCase__ : Optional[int] = None
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
if self.structure_module is None:
lowercase__ = StructureModuleConfig()
elif isinstance(self.structure_module , _UpperCAmelCase ):
lowercase__ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
lowercase__ = self.sequence_state_dim // self.sequence_head_width
lowercase__ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." )
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = asdict(self )
lowercase__ = self.structure_module.to_dict()
return output
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : Tuple = 384
lowerCAmelCase__ : Any = 128
lowerCAmelCase__ : List[str] = 16
lowerCAmelCase__ : Tuple = 128
lowerCAmelCase__ : Dict = 12
lowerCAmelCase__ : Any = 4
lowerCAmelCase__ : List[str] = 8
lowerCAmelCase__ : List[Any] = 0.1
lowerCAmelCase__ : List[str] = 8
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : List[Any] = 2
lowerCAmelCase__ : Any = 7
lowerCAmelCase__ : List[Any] = 10
lowerCAmelCase__ : Dict = 1e-8
lowerCAmelCase__ : Any = 1e5
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
return asdict(self )
def _SCREAMING_SNAKE_CASE () -> Any:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 460 |
from statistics import mean
import numpy as np
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
a_ = 0
# Number of processes finished
a_ = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
a_ = [0] * no_of_process
# List to include calculation results
a_ = [0] * no_of_process
# Sort by arrival time.
a_ = [burst_time[i] for i in np.argsort(UpperCAmelCase__ )]
a_ = [process_name[i] for i in np.argsort(UpperCAmelCase__ )]
arrival_time.sort()
while no_of_process > finished_process_count:
a_ = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
a_ = arrival_time[i]
a_ = 0
# Index showing the location of the process being performed
a_ = 0
# Saves the current response ratio.
a_ = 0
for i in range(0 , UpperCAmelCase__ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
a_ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
a_ = temp
a_ = i
# Calculate the turn around time
a_ = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
a_ = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
a_ = [0] * no_of_process
for i in range(0 , UpperCAmelCase__ ):
a_ = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
A_ : List[Any] =5
A_ : str =["""A""", """B""", """C""", """D""", """E"""]
A_ : Union[str, Any] =[1, 2, 3, 4, 5]
A_ : Any =[1, 2, 3, 4, 5]
A_ : List[Any] =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
A_ : Optional[int] =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''') | 483 | 0 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def A__ ( ) -> None:
'''simple docstring'''
print("Making key files..." )
make_key_files("rsa" , 1024 )
print("Key files generation successful." )
def A__ ( A__ ) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p..." )
_UpperCAmelCase = rabinMiller.generate_large_prime(__snake_case )
print("Generating prime q..." )
_UpperCAmelCase = rabinMiller.generate_large_prime(__snake_case )
_UpperCAmelCase = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
_UpperCAmelCase = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(__snake_case , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
_UpperCAmelCase = cryptoMath.find_mod_inverse(__snake_case , (p - 1) * (q - 1) )
_UpperCAmelCase = (n, e)
_UpperCAmelCase = (n, d)
return (public_key, private_key)
def A__ ( A__ , A__ ) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
_UpperCAmelCase , _UpperCAmelCase = generate_key(__snake_case )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 712 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE_ = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Tuple = ["input_ids", "attention_mask"]
A__ : List[Any] = RobertaTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> str:
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_UpperCAmelCase = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**snake_case_ )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = "post_processor"
_UpperCAmelCase = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
_UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase = tuple(state["sep"] )
if "cls" in state:
_UpperCAmelCase = tuple(state["cls"] )
_UpperCAmelCase = False
if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = True
if state.get("trim_offsets" , snake_case_ ) != trim_offsets:
_UpperCAmelCase = trim_offsets
_UpperCAmelCase = True
if changes_to_apply:
_UpperCAmelCase = getattr(snake_case_ , state.pop("type" ) )
_UpperCAmelCase = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
def __A ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __A ( self , snake_case_ ) -> List[Any]:
_UpperCAmelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
_UpperCAmelCase = value
def __A ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_UpperCAmelCase = kwargs.get("is_split_into_words" , snake_case_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def __A ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_UpperCAmelCase = kwargs.get("is_split_into_words" , snake_case_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case_ , **snake_case_ )
def __A ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_UpperCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def __A ( self , snake_case_ , snake_case_=None ) -> Tuple:
_UpperCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __A ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 579 | 0 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ):
A_ = torch.nn.Linear(10 , 10 )
A_ = torch.optim.SGD(model.parameters() , 0.1 )
A_ = Accelerator()
A_ = accelerator.prepare(UpperCAmelCase )
try:
pickle.loads(pickle.dumps(UpperCAmelCase ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state() | 86 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
UpperCAmelCase : Optional[Any] = """
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
UpperCAmelCase : Any = """\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
"""
UpperCAmelCase : Union[str, Any] = """
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=[\"About 95 species are currently accepted .\"]
>>> predictions=[\"About 95 you now get in .\"]
>>> references=[[\"About 95 species are currently known .\"]]
>>> wiki_split = datasets.load_metric(\"wiki_split\")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}
"""
def _A ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
def remove_articles(SCREAMING_SNAKE_CASE : Optional[Any] ):
a__ : Any =re.compile(r"\b(a|an|the)\b" , re.UNICODE )
return re.sub(SCREAMING_SNAKE_CASE , " " , SCREAMING_SNAKE_CASE )
def white_space_fix(SCREAMING_SNAKE_CASE : Optional[Any] ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE : List[Any] ):
a__ : List[Any] =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE ) ) ) )
def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
return int(normalize_answer(SCREAMING_SNAKE_CASE ) == normalize_answer(SCREAMING_SNAKE_CASE ) )
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
a__ : Any =[any(compute_exact(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
return (sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE )) * 100
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
a__ : str =[rgram for rgrams in rgramslist for rgram in rgrams]
a__ : List[str] =Counter(SCREAMING_SNAKE_CASE )
a__ : List[Any] =Counter(SCREAMING_SNAKE_CASE )
a__ : Any =Counter()
for sgram, scount in sgramcounter.items():
a__ : List[str] =scount * numref
a__ : List[str] =Counter(SCREAMING_SNAKE_CASE )
a__ : Optional[Any] =Counter()
for cgram, ccount in cgramcounter.items():
a__ : int =ccount * numref
# KEEP
a__ : Any =sgramcounter_rep & cgramcounter_rep
a__ : List[str] =keepgramcounter_rep & rgramcounter
a__ : str =sgramcounter_rep & rgramcounter
a__ : Optional[int] =0
a__ : List[Any] =0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a__ : Tuple =1
a__ : Dict =1
if len(SCREAMING_SNAKE_CASE ) > 0:
a__ : List[str] =keeptmpscorea / len(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
a__ : int =keeptmpscorea / sum(keepgramcounterall_rep.values() )
a__ : Tuple =0
if keepscore_precision > 0 or keepscore_recall > 0:
a__ : Any =2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
a__ : Optional[Any] =sgramcounter_rep - cgramcounter_rep
a__ : Optional[Any] =delgramcounter_rep - rgramcounter
a__ : Optional[int] =sgramcounter_rep - rgramcounter
a__ : int =0
a__ : Dict =0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a__ : Any =1
if len(SCREAMING_SNAKE_CASE ) > 0:
a__ : Optional[Any] =deltmpscorea / len(SCREAMING_SNAKE_CASE )
# ADDITION
a__ : Union[str, Any] =set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE )
a__ : List[Any] =set(SCREAMING_SNAKE_CASE ) & set(SCREAMING_SNAKE_CASE )
a__ : Tuple =set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE )
a__ : Any =0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a__ : int =1
a__ : Dict =1
if len(SCREAMING_SNAKE_CASE ) > 0:
a__ : Optional[int] =addtmpscore / len(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
a__ : List[str] =addtmpscore / len(SCREAMING_SNAKE_CASE )
a__ : List[str] =0
if addscore_precision > 0 or addscore_recall > 0:
a__ : Optional[Any] =2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
a__ : int =len(SCREAMING_SNAKE_CASE )
a__ : Tuple =ssent.split(" " )
a__ : str =csent.split(" " )
a__ : List[Any] =[]
a__ : int =[]
a__ : List[Any] =[]
a__ : Any =[]
a__ : List[Any] =[]
a__ : Any =[]
a__ : Union[str, Any] =[]
a__ : Union[str, Any] =[]
a__ : Union[str, Any] =[]
a__ : Tuple =[]
for rsent in rsents:
a__ : Optional[int] =rsent.split(" " )
a__ : Tuple =[]
a__ : Tuple =[]
a__ : int =[]
ragramslist.append(SCREAMING_SNAKE_CASE )
for i in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE ) - 1:
a__ : Union[str, Any] =ragrams[i] + " " + ragrams[i + 1]
ragrams.append(SCREAMING_SNAKE_CASE )
if i < len(SCREAMING_SNAKE_CASE ) - 2:
a__ : Optional[int] =ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(SCREAMING_SNAKE_CASE )
if i < len(SCREAMING_SNAKE_CASE ) - 3:
a__ : List[Any] =ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(SCREAMING_SNAKE_CASE )
ragramslist.append(SCREAMING_SNAKE_CASE )
ragramslist.append(SCREAMING_SNAKE_CASE )
ragramslist.append(SCREAMING_SNAKE_CASE )
for i in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE ) - 1:
a__ : str =sagrams[i] + " " + sagrams[i + 1]
sagrams.append(SCREAMING_SNAKE_CASE )
if i < len(SCREAMING_SNAKE_CASE ) - 2:
a__ : Dict =sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(SCREAMING_SNAKE_CASE )
if i < len(SCREAMING_SNAKE_CASE ) - 3:
a__ : Any =sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(SCREAMING_SNAKE_CASE )
for i in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE ) - 1:
a__ : List[Any] =cagrams[i] + " " + cagrams[i + 1]
cagrams.append(SCREAMING_SNAKE_CASE )
if i < len(SCREAMING_SNAKE_CASE ) - 2:
a__ : List[str] =cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(SCREAMING_SNAKE_CASE )
if i < len(SCREAMING_SNAKE_CASE ) - 3:
a__ : Optional[int] =cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(SCREAMING_SNAKE_CASE )
((a__) , (a__) , (a__)) : Optional[Any] =SARIngram(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
((a__) , (a__) , (a__)) : Dict =SARIngram(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
((a__) , (a__) , (a__)) : List[str] =SARIngram(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
((a__) , (a__) , (a__)) : Dict =SARIngram(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Tuple =sum([keepascore, keepascore, keepascore, keepascore] ) / 4
a__ : Tuple =sum([delascore, delascore, delascore, delascore] ) / 4
a__ : int =sum([addascore, addascore, addascore, addascore] ) / 4
a__ : Optional[int] =(avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _A ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "13a" , SCREAMING_SNAKE_CASE : bool = True ):
"""simple docstring"""
if lowercase:
a__ : Optional[int] =sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
a__ : Any =sacrebleu.metrics.bleu._get_tokenizer(SCREAMING_SNAKE_CASE )()(SCREAMING_SNAKE_CASE )
else:
a__ : Any =sacrebleu.TOKENIZERS[tokenizer]()(SCREAMING_SNAKE_CASE )
elif tokenizer == "moses":
a__ : Dict =sacremoses.MosesTokenizer().tokenize(SCREAMING_SNAKE_CASE , return_str=SCREAMING_SNAKE_CASE , escape=SCREAMING_SNAKE_CASE )
elif tokenizer == "penn":
a__ : Optional[int] =sacremoses.MosesTokenizer().penn_tokenize(SCREAMING_SNAKE_CASE , return_str=SCREAMING_SNAKE_CASE )
else:
a__ : Dict =sentence
if not return_str:
a__ : List[Any] =normalized_sent.split()
return normalized_sent
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
if not (len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )):
raise ValueError("Sources length must match predictions and references lengths." )
a__ : Dict =0
for src, pred, refs in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
sari_score += SARIsent(normalize(SCREAMING_SNAKE_CASE ) , normalize(SCREAMING_SNAKE_CASE ) , [normalize(SCREAMING_SNAKE_CASE ) for sent in refs] )
a__ : Tuple =sari_score / len(SCREAMING_SNAKE_CASE )
return 100 * sari_score
def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple="exp" , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple=False , ):
"""simple docstring"""
a__ : int =len(references[0] )
if any(len(SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
a__ : List[str] =[[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE )]
a__ : Optional[int] =sacrebleu.corpus_bleu(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , smooth_method=SCREAMING_SNAKE_CASE , smooth_value=SCREAMING_SNAKE_CASE , force=SCREAMING_SNAKE_CASE , lowercase=SCREAMING_SNAKE_CASE , use_effective_order=SCREAMING_SNAKE_CASE , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __lowerCAmelCase ( datasets.Metric):
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] ={}
result.update({"sari": compute_sari(sources=lowerCAmelCase__ , predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )} )
result.update({"exact": compute_em(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )} )
return result
| 563 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase = "platform"
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Dict:
if attention_mask is None:
_lowercase : str = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_lowercase : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_lowercase : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=9_9 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=0.02 , ):
_lowercase : List[str] = parent
_lowercase : List[Any] = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : Tuple = use_labels
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = intermediate_size
_lowercase : Any = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = eos_token_id
_lowercase : int = pad_token_id
_lowercase : Tuple = bos_token_id
_lowercase : List[Any] = initializer_range
def __a ( self ):
_lowercase : str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_lowercase : List[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_lowercase : List[str] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_lowercase : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , )
_lowercase : List[Any] = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = 2_0
_lowercase : List[Any] = model_class_name(_lowerCAmelCase )
_lowercase : List[Any] = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : int = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : int = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , )
_lowercase : List[Any] = model.decode(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Dict = 2_0
_lowercase : Any = model_class_name(_lowerCAmelCase )
_lowercase : int = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : Optional[int] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowercase : List[str] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Dict = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase )
_lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
_UpperCamelCase : Tuple = 99
def __a ( self ):
_lowercase : Dict = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
_lowercase : Union[str, Any] = input_ids.shape[0]
_lowercase : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __a ( self ):
_lowercase , _lowercase , _lowercase : int = self._get_config_and_data()
_lowercase : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_lowercase : Union[str, Any] = lm_model(input_ids=_lowerCAmelCase )
_lowercase : str = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
_lowercase : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_lowercase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
_lowercase : Optional[int] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
_lowercase : Dict = lm_model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
_lowercase : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : Dict = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
_lowercase : Union[str, Any] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
_lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase_ ( __snake_case , unittest.TestCase , __snake_case ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_UpperCamelCase : Any = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __a ( self ):
_lowercase : List[str] = FlaxBlenderbotSmallModelTester(self )
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : Any = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = model_class(_lowerCAmelCase )
@jax.jit
def encode_jitted(_lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
with self.subTest('JIT Enabled' ):
_lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __a ( self ):
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : int = model_class(_lowerCAmelCase )
_lowercase : int = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
_lowercase : List[Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
return model.decode(
decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , )
with self.subTest('JIT Enabled' ):
_lowercase : Dict = decode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Any = decode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __a ( self ):
for model_class_name in self.all_model_classes:
_lowercase : Dict = model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowercase : Any = np.ones((1, 1) ) * model.config.eos_token_id
_lowercase : int = model(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
| 677 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase = "platform"
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Dict:
if attention_mask is None:
_lowercase : str = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_lowercase : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_lowercase : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=9_9 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=0.02 , ):
_lowercase : List[str] = parent
_lowercase : List[Any] = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : Tuple = use_labels
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = intermediate_size
_lowercase : Any = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = eos_token_id
_lowercase : int = pad_token_id
_lowercase : Tuple = bos_token_id
_lowercase : List[Any] = initializer_range
def __a ( self ):
_lowercase : str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_lowercase : List[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_lowercase : List[str] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_lowercase : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , )
_lowercase : List[Any] = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = 2_0
_lowercase : List[Any] = model_class_name(_lowerCAmelCase )
_lowercase : List[Any] = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : int = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : int = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , )
_lowercase : List[Any] = model.decode(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Dict = 2_0
_lowercase : Any = model_class_name(_lowerCAmelCase )
_lowercase : int = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : Optional[int] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowercase : List[str] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Dict = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase )
_lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
_UpperCamelCase : Tuple = 99
def __a ( self ):
_lowercase : Dict = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
_lowercase : Union[str, Any] = input_ids.shape[0]
_lowercase : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __a ( self ):
_lowercase , _lowercase , _lowercase : int = self._get_config_and_data()
_lowercase : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_lowercase : Union[str, Any] = lm_model(input_ids=_lowerCAmelCase )
_lowercase : str = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
_lowercase : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_lowercase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
_lowercase : Optional[int] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
_lowercase : Dict = lm_model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
_lowercase : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : Dict = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
_lowercase : Union[str, Any] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
_lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase_ ( __snake_case , unittest.TestCase , __snake_case ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_UpperCamelCase : Any = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __a ( self ):
_lowercase : List[str] = FlaxBlenderbotSmallModelTester(self )
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : Any = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = model_class(_lowerCAmelCase )
@jax.jit
def encode_jitted(_lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
with self.subTest('JIT Enabled' ):
_lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __a ( self ):
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : int = model_class(_lowerCAmelCase )
_lowercase : int = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
_lowercase : List[Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
return model.decode(
decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , )
with self.subTest('JIT Enabled' ):
_lowercase : Dict = decode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Any = decode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __a ( self ):
for model_class_name in self.all_model_classes:
_lowercase : Dict = model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowercase : Any = np.ones((1, 1) ) * model.config.eos_token_id
_lowercase : int = model(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
| 677 | 1 |
"""simple docstring"""
_lowercase : Dict = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def lowercase__ ( snake_case_ :int ):
__UpperCAmelCase = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_lowercase : list[bool | None] = [None] * 10_00_00_00
_lowercase : List[Any] = True
_lowercase : Any = False
def lowercase__ ( snake_case_ :int ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__UpperCAmelCase = chain(next_number(snake_case_ ) )
__UpperCAmelCase = number_chain
while number < 10_000_000:
__UpperCAmelCase = number_chain
number *= 10
return number_chain
def lowercase__ ( snake_case_ :int = 10_000_000 ):
for i in range(1 , snake_case_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 49 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Any = StableUnCLIPPipeline
a__ : Dict = TEXT_TO_IMAGE_PARAMS
a__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ : Optional[int] = False
def a ( self : List[str] ):
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowercase )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowercase , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def a ( self : str , _lowercase : Dict , _lowercase : List[str]=0 ):
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Any ):
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_lowercase )
def a ( self : int ):
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowercase )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ):
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=_lowercase , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
def a ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 49 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : Optional[int] = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = ['ConvNextFeatureExtractor']
lowerCAmelCase : str = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 708 |
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowerCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , A_=768 )-> Any:
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase = proj_size
UpperCamelCase = CLIPVisionModel(A_ )
UpperCamelCase = PaintByExampleMapper(A_ )
UpperCamelCase = nn.LayerNorm(config.hidden_size )
UpperCamelCase = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCamelCase = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def UpperCAmelCase_ ( self , A_ , A_=False )-> Dict:
'''simple docstring'''
UpperCamelCase = self.model(pixel_values=A_ )
UpperCamelCase = clip_output.pooler_output
UpperCamelCase = self.mapper(latent_states[:, None] )
UpperCamelCase = self.final_layer_norm(A_ )
UpperCamelCase = self.proj_out(A_ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ )-> Tuple:
'''simple docstring'''
super().__init__()
UpperCamelCase = (config.num_hidden_layers + 1) // 5
UpperCamelCase = config.hidden_size
UpperCamelCase = 1
UpperCamelCase = nn.ModuleList(
[
BasicTransformerBlock(A_ , A_ , A_ , activation_fn='gelu' , attention_bias=A_ )
for _ in range(A_ )
] )
def UpperCAmelCase_ ( self , A_ )-> Dict:
'''simple docstring'''
for block in self.blocks:
UpperCamelCase = block(A_ )
return hidden_states
| 432 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Any = BeautifulSoup(requests.get(__UpperCAmelCase , params=__UpperCAmelCase ).content , '''html.parser''' )
lowerCamelCase_ : Optional[Any] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
lowerCamelCase_ : List[str] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__lowerCamelCase : List[Any] = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 501 |
'''simple docstring'''
from torch import nn
def _A ( UpperCAmelCase ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 531 | 0 |
"""simple docstring"""
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__snake_case = {
"""gwf-440k""": {
"""url""": """https://model-server.zqevans2.workers.dev/gwf-440k.ckpt""",
"""sample_rate""": 48000,
"""sample_size""": 65536,
},
"""jmann-small-190k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt""",
"""sample_rate""": 48000,
"""sample_size""": 65536,
},
"""jmann-large-580k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt""",
"""sample_rate""": 48000,
"""sample_size""": 131072,
},
"""maestro-uncond-150k""": {
"""url""": """https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt""",
"""sample_rate""": 16000,
"""sample_size""": 65536,
},
"""unlocked-uncond-250k""": {
"""url""": """https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt""",
"""sample_rate""": 16000,
"""sample_size""": 65536,
},
"""honk-140k""": {
"""url""": """https://model-server.zqevans2.workers.dev/honk-140k.ckpt""",
"""sample_rate""": 16000,
"""sample_size""": 65536,
},
}
def __lowerCAmelCase ( lowercase : List[Any] , lowercase : List[str] ) -> Optional[int]:
"""simple docstring"""
return torch.atana(lowercase , lowercase ) / math.pi * 2
def __lowerCAmelCase ( lowercase : Tuple ) -> Dict:
"""simple docstring"""
snake_case : Any = torch.sin(t * math.pi / 2 ) ** 2
snake_case : Dict = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowercase , lowercase )
class _lowerCAmelCase ( snake_case_ ):
pass
class _lowerCAmelCase ( nn.Module ):
def __init__( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
super().__init__()
snake_case : Optional[int] = DiffusionAttnUnetaD(UpperCamelCase__ , n_attn_layers=4 )
snake_case : Optional[int] = deepcopy(self.diffusion )
snake_case : Dict = torch.quasirandom.SobolEngine(1 , scramble=UpperCamelCase__ )
def __lowerCAmelCase ( lowercase : Optional[int] ) -> int:
"""simple docstring"""
snake_case : Tuple = MODELS_MAP[model_name]["url"]
os.system(F'wget {url} ./' )
return F'./{model_name}.ckpt'
__snake_case = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
}
__snake_case = {
"""8""": """resnets.0""",
"""9""": """attentions.0""",
"""10""": """resnets.1""",
"""11""": """attentions.1""",
"""12""": """resnets.2""",
"""13""": """attentions.2""",
}
__snake_case = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
"""8""": """resnets.3""",
"""9""": """attentions.3""",
"""10""": """resnets.4""",
"""11""": """attentions.4""",
"""12""": """resnets.5""",
"""13""": """attentions.5""",
}
__snake_case = {
"""0""": """resnets.0""",
"""1""": """resnets.1""",
"""2""": """resnets.2""",
"""4""": """resnets.0""",
"""5""": """resnets.1""",
"""6""": """resnets.2""",
}
__snake_case = {
"""skip""": """conv_skip""",
"""main.0""": """conv_1""",
"""main.1""": """group_norm_1""",
"""main.3""": """conv_2""",
"""main.4""": """group_norm_2""",
}
__snake_case = {
"""norm""": """group_norm""",
"""qkv_proj""": ["""query""", """key""", """value"""],
"""out_proj""": ["""proj_attn"""],
}
def __lowerCAmelCase ( lowercase : int ) -> List[Any]:
"""simple docstring"""
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __lowerCAmelCase ( lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(lowercase ) and not isinstance(lowercase , lowercase ):
return name.replace(lowercase , lowercase )
elif name.startswith(lowercase ):
return [name.replace(lowercase , lowercase ) for v in value]
raise ValueError(F'Attn error with {name}' )
def __lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : Optional[int]=13 ) -> Any:
"""simple docstring"""
snake_case : List[Any] = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
snake_case : int = 0
if string.startswith("net.3." ):
depth += 1
snake_case : Dict = string[6:]
elif string.startswith("net." ):
snake_case : List[Any] = string[4:]
while string.startswith("main.7." ):
depth += 1
snake_case : Optional[int] = string[7:]
if string.startswith("main." ):
snake_case : Tuple = string[5:]
# mid block
if string[:2].isdigit():
snake_case : Dict = string[:2]
snake_case : Dict = string[2:]
else:
snake_case : List[str] = string[0]
snake_case : List[Any] = string[1:]
if depth == max_depth:
snake_case : Any = MID_NUM_TO_LAYER[layer_num]
snake_case : List[Any] = "mid_block"
elif depth > 0 and int(lowercase ) < 7:
snake_case : Union[str, Any] = DOWN_NUM_TO_LAYER[layer_num]
snake_case : Optional[Any] = F'down_blocks.{depth}'
elif depth > 0 and int(lowercase ) > 7:
snake_case : Dict = UP_NUM_TO_LAYER[layer_num]
snake_case : List[Any] = F'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
snake_case : Union[str, Any] = DEPTH_0_TO_LAYER[layer_num]
snake_case : int = F'up_blocks.{max_depth - 1}' if int(lowercase ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F'Naming error with {input_string} and string_left: {string_left}.' )
snake_case : Optional[int] = string_left[1:]
if "resnets" in new_layer:
snake_case : Optional[int] = convert_resconv_naming(lowercase )
elif "attentions" in new_layer:
snake_case : str = convert_attn_naming(lowercase )
snake_case : Optional[int] = new_string_left
if not isinstance(lowercase , lowercase ):
snake_case : Optional[Any] = prefix + "." + new_layer + "." + string_left
else:
snake_case : Optional[int] = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def __lowerCAmelCase ( lowercase : Dict ) -> Tuple:
"""simple docstring"""
snake_case : Optional[int] = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
snake_case : List[Any] = rename(lowercase )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowercase , lowercase ):
snake_case : Tuple = transform_conv_attns(lowercase , lowercase , lowercase )
else:
snake_case : Optional[int] = v
return new_state_dict
def __lowerCAmelCase ( lowercase : int , lowercase : Tuple , lowercase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if len(lowercase ) == 1:
if len(v.shape ) == 3:
# weight
snake_case : Tuple = v[:, :, 0]
else:
# bias
snake_case : Union[str, Any] = v
else:
# qkv matrices
snake_case : Union[str, Any] = v.shape[0]
snake_case : Optional[int] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case : Union[str, Any] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case : int = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __lowerCAmelCase ( lowercase : List[str] ) -> List[str]:
"""simple docstring"""
snake_case : Union[str, Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
snake_case : str = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
snake_case : List[str] = download(lowercase )
snake_case : Dict = MODELS_MAP[model_name]["sample_rate"]
snake_case : Any = MODELS_MAP[model_name]["sample_size"]
snake_case : int = Object()
snake_case : Optional[Any] = sample_size
snake_case : Optional[Any] = sample_rate
snake_case : Tuple = 0
snake_case : List[str] = UNetaDModel(sample_size=lowercase , sample_rate=lowercase )
snake_case : Tuple = diffusers_model.state_dict()
snake_case : Optional[Any] = DiffusionUncond(lowercase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowercase )["state_dict"] )
snake_case : str = orig_model.diffusion_ema.eval()
snake_case : Dict = orig_model.state_dict()
snake_case : List[str] = rename_orig_weights(lowercase )
snake_case : Tuple = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case : List[Any] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowercase ) == 0, F'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("kernel" ) for k in list(lowercase ) ), F'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
snake_case : List[Any] = value.squeeze()
snake_case : List[str] = value
diffusers_model.load_state_dict(lowercase )
snake_case : Any = 100
snake_case : List[Any] = 33
snake_case : str = IPNDMScheduler(num_train_timesteps=lowercase )
snake_case : Optional[Any] = torch.manual_seed(lowercase )
snake_case : int = torch.randn([1, 2, config.sample_size] , generator=lowercase ).to(lowercase )
snake_case : Dict = torch.linspace(1 , 0 , steps + 1 , device=lowercase )[:-1]
snake_case : str = get_crash_schedule(lowercase )
snake_case : Dict = DanceDiffusionPipeline(unet=lowercase , scheduler=lowercase )
snake_case : List[Any] = torch.manual_seed(33 )
snake_case : Dict = pipe(num_inference_steps=lowercase , generator=lowercase ).audios
snake_case : Union[str, Any] = sampling.iplms_sample(lowercase , lowercase , lowercase , {} )
snake_case : List[str] = generated.clamp(-1 , 1 )
snake_case : Optional[Any] = (generated - audio).abs().sum()
snake_case : List[Any] = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , lowercase )
print("Diff max" , lowercase )
assert diff_max < 1e-3, F'Diff max: {diff_max} is too much :-/'
print(F'Conversion for {model_name} successful!' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
__snake_case = parser.parse_args()
main(args)
| 117 |
"""simple docstring"""
import baseaa
def __lowerCAmelCase ( lowercase : str ) -> bytes:
"""simple docstring"""
return baseaa.aaaencode(string.encode("utf-8" ) )
def __lowerCAmelCase ( lowercase : bytes ) -> str:
"""simple docstring"""
return baseaa.aaadecode(lowercase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 117 | 1 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__magic_name__: int = logging.get_logger(__name__)
__magic_name__: Union[str, Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
__magic_name__: Tuple = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def UpperCamelCase ( _A, _A, _A, _A, _A ):
"""simple docstring"""
for attribute in key.split(""".""" ):
__magic_name__ : str = getattr(_A, _A )
if weight_type is not None:
__magic_name__ : Optional[Any] = getattr(_A, _A ).shape
else:
__magic_name__ : int = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__magic_name__ : Union[str, Any] = value
elif weight_type == "weight_g":
__magic_name__ : Optional[int] = value
elif weight_type == "weight_v":
__magic_name__ : Union[str, Any] = value
elif weight_type == "bias":
__magic_name__ : Any = value
else:
__magic_name__ : Optional[Any] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : List[str] = []
__magic_name__ : str = fairseq_model.state_dict()
__magic_name__ : Dict = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__magic_name__ : Any = False
if "conv_layers" in name:
load_conv_layer(
_A, _A, _A, _A, hf_model.config.feat_extract_norm == """group""", )
__magic_name__ : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__magic_name__ : List[str] = True
if "*" in mapped_key:
__magic_name__ : Tuple = name.split(_A )[0].split(""".""" )[-2]
__magic_name__ : Union[str, Any] = mapped_key.replace("""*""", _A )
if "weight_g" in name:
__magic_name__ : Optional[int] = """weight_g"""
elif "weight_v" in name:
__magic_name__ : Union[str, Any] = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
__magic_name__ : Optional[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__magic_name__ : int = """weight"""
else:
__magic_name__ : Optional[Any] = None
set_recursively(_A, _A, _A, _A, _A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCamelCase ( _A, _A, _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Dict = full_name.split("""conv_layers.""" )[-1]
__magic_name__ : Tuple = name.split(""".""" )
__magic_name__ : str = int(items[0] )
__magic_name__ : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__magic_name__ : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__magic_name__ : Any = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__magic_name__ : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__magic_name__ : Optional[int] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_A )
@torch.no_grad()
def UpperCamelCase ( _A, _A, _A=None ):
"""simple docstring"""
__magic_name__ : Optional[int] = torch.load(_A )
__magic_name__ : Union[str, Any] = WavLMConfigOrig(checkpoint["""cfg"""] )
__magic_name__ : Union[str, Any] = WavLMOrig(_A )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
__magic_name__ : Any = WavLMConfig.from_pretrained(_A )
else:
__magic_name__ : str = WavLMConfig()
__magic_name__ : List[str] = WavLMModel(_A )
recursively_load_weights(_A, _A )
hf_wavlm.save_pretrained(_A )
if __name__ == "__main__":
__magic_name__: Optional[int] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
__magic_name__: List[str] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 324 |
__magic_name__: int = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 324 | 1 |
import re
import string
import numpy as np
import datasets
_lowerCamelCase : Optional[Any] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
_lowerCamelCase : int = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
_lowerCamelCase : List[Any] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case (datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str=None , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Tuple=False , ) -> str:
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_lowerCAmelCase : Dict = np.array([re.sub(lowercase__ , """""" , lowercase__ ) for x in predictions] )
_lowerCAmelCase : Optional[int] = np.array([re.sub(lowercase__ , """""" , lowercase__ ) for x in references] )
else:
_lowerCAmelCase : Tuple = np.asarray(lowercase__ )
_lowerCAmelCase : Dict = np.asarray(lowercase__ )
if ignore_case:
_lowerCAmelCase : Dict = np.char.lower(lowercase__ )
_lowerCAmelCase : Dict = np.char.lower(lowercase__ )
if ignore_punctuation:
_lowerCAmelCase : Optional[Any] = string.punctuation.maketrans("""""" , """""" , string.punctuation )
_lowerCAmelCase : int = np.char.translate(lowercase__ , table=lowercase__ )
_lowerCAmelCase : Tuple = np.char.translate(lowercase__ , table=lowercase__ )
if ignore_numbers:
_lowerCAmelCase : Any = string.digits.maketrans("""""" , """""" , string.digits )
_lowerCAmelCase : Optional[Any] = np.char.translate(lowercase__ , table=lowercase__ )
_lowerCAmelCase : str = np.char.translate(lowercase__ , table=lowercase__ )
_lowerCAmelCase : str = predictions == references
return {"exact_match": np.mean(lowercase__ ) * 100}
| 703 |
def _UpperCAmelCase (UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [int(UpperCamelCase_ ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(UpperCamelCase_ ) == 4 and all(0 <= int(UpperCamelCase_ ) <= 254 for octet in octets )
if __name__ == "__main__":
_lowerCamelCase : List[str] = input().strip()
_lowerCamelCase : int = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(F'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 196 | 0 |
'''simple docstring'''
from math import factorial
def a__ ( lowercase : int = 100 ) -> int:
"""simple docstring"""
return sum(map(lowercase, str(factorial(lowercase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 98 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowercase__ : str = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
lowercase__ : str = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
lowercase__ : Dict = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : Dict ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = 0.0
for i, j in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase__ , lowerCAmelCase__ ) else 0.0
_UpperCamelCase = n_correct / len(lowerCAmelCase__ )
return {
"accuracy": accuracy,
}
| 98 | 1 |
"""simple docstring"""
from __future__ import annotations
class UpperCAmelCase__ :
def __init__( self : Any , snake_case : list[list[int]] ) -> Union[str, Any]:
'''simple docstring'''
A = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(snake_case ) != 0:
A = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(snake_case ) != cols:
raise error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise error
A = rows
else:
A = []
def A_ ( self : Any ) -> list[list[int]]:
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
return len(self.rows )
@property
def A_ ( self : int ) -> int:
'''simple docstring'''
return len(self.rows[0] )
@property
def A_ ( self : int ) -> tuple[int, int]:
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def A_ ( self : Tuple ) -> bool:
'''simple docstring'''
return self.order[0] == self.order[1]
def A_ ( self : Tuple ) -> Matrix:
'''simple docstring'''
A = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(snake_case )
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def A_ ( self : Dict ) -> bool:
'''simple docstring'''
return bool(self.determinant() )
def A_ ( self : Any , snake_case : int , snake_case : int ) -> int:
'''simple docstring'''
A = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(snake_case ).determinant()
def A_ ( self : Tuple , snake_case : int , snake_case : int ) -> int:
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(snake_case , snake_case )
return -1 * self.get_minor(snake_case , snake_case )
def A_ ( self : List[str] ) -> Matrix:
'''simple docstring'''
return Matrix(
[
[self.get_minor(snake_case , snake_case ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def A_ ( self : Union[str, Any] ) -> Matrix:
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def A_ ( self : Union[str, Any] ) -> Matrix:
'''simple docstring'''
A = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(snake_case )
def A_ ( self : Tuple ) -> Matrix:
'''simple docstring'''
A = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self : Optional[Any] ) -> str:
'''simple docstring'''
return str(self.rows )
def __str__( self : List[str] ) -> str:
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(snake_case ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def A_ ( self : Union[str, Any] , snake_case : list[int] , snake_case : int | None = None ) -> None:
'''simple docstring'''
A = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(snake_case )
else:
A = self.rows[0:position] + [row] + self.rows[position:]
def A_ ( self : Tuple , snake_case : list[int] , snake_case : int | None = None ) -> None:
'''simple docstring'''
A = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in column:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
A = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
A = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : str , snake_case : object ) -> bool:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : int , snake_case : object ) -> bool:
'''simple docstring'''
return not self == other
def __neg__( self : List[Any] ) -> Matrix:
'''simple docstring'''
return self * -1
def __add__( self : Tuple , snake_case : Matrix ) -> Matrix:
'''simple docstring'''
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Tuple , snake_case : Matrix ) -> Matrix:
'''simple docstring'''
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Tuple , snake_case : Matrix | int | float ) -> Matrix:
'''simple docstring'''
if isinstance(snake_case , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(snake_case , snake_case ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(snake_case , snake_case ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self : Optional[Any] , snake_case : int ) -> Matrix:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
A = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def A_ ( cls : Optional[int] , snake_case : list[int] , snake_case : list[int] ) -> int:
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt')
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 16000 ) -> Union[str, Any]:
A = int(round(sample_rate * max_length ) )
if len(lowerCamelCase__ ) <= sample_length:
return wav
A = randint(0 , len(lowerCamelCase__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ : Optional[str] = field(default=UpperCamelCase ,metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowerCAmelCase_ : Optional[str] = field(
default=UpperCamelCase ,metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowerCAmelCase_ : Optional[str] = field(
default=UpperCamelCase ,metadata={"""help""": """A file containing the training audio paths and labels."""} )
lowerCAmelCase_ : Optional[str] = field(
default=UpperCamelCase ,metadata={"""help""": """A file containing the validation audio paths and labels."""} )
lowerCAmelCase_ : str = field(
default="""train""" ,metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} ,)
lowerCAmelCase_ : str = field(
default="""validation""" ,metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to 'validation'"""
)
} ,)
lowerCAmelCase_ : str = field(
default="""audio""" ,metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} ,)
lowerCAmelCase_ : str = field(
default="""label""" ,metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""} )
lowerCAmelCase_ : Optional[int] = field(
default=UpperCamelCase ,metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} ,)
lowerCAmelCase_ : Optional[int] = field(
default=UpperCamelCase ,metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} ,)
lowerCAmelCase_ : float = field(
default=20 ,metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} ,)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ : str = field(
default="""facebook/wav2vec2-base""" ,metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ,)
lowerCAmelCase_ : Optional[str] = field(
default=UpperCamelCase ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCAmelCase_ : Optional[str] = field(
default=UpperCamelCase ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} )
lowerCAmelCase_ : str = field(
default="""main""" ,metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} ,)
lowerCAmelCase_ : Optional[str] = field(
default=UpperCamelCase ,metadata={"""help""": """Name or path of preprocessor config."""} )
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} )
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} )
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} ,)
lowerCAmelCase_ : Optional[bool] = field(
default=UpperCamelCase ,metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} ,)
def A_ ( self : List[str] ) -> str:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , snake_case , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def lowerCAmelCase__ ( ) -> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' , lowerCamelCase__ , lowerCamelCase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase__ )
transformers.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
A = DatasetDict()
A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
'Make sure to set `--audio_column_name` to the correct audio column - one of '
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
'Make sure to set `--label_column_name` to the correct text column - one of '
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
A = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
A = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
A = feature_extractor.model_input_names[0]
def train_transforms(lowerCamelCase__ ):
A = []
for audio in batch[data_args.audio_column_name]:
A = random_subsample(
audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowerCamelCase__ )
A = feature_extractor(lowerCamelCase__ , sampling_rate=feature_extractor.sampling_rate )
A = {model_input_name: inputs.get(lowerCamelCase__ )}
A = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowerCamelCase__ ):
A = [audio['array'] for audio in batch[data_args.audio_column_name]]
A = feature_extractor(lowerCamelCase__ , sampling_rate=feature_extractor.sampling_rate )
A = {model_input_name: inputs.get(lowerCamelCase__ )}
A = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A = raw_datasets['train'].features[data_args.label_column_name].names
A , A = {}, {}
for i, label in enumerate(lowerCamelCase__ ):
A = str(lowerCamelCase__ )
A = label
# Load the accuracy metric from the datasets package
A = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase__ ):
A = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowerCamelCase__ , references=eval_pred.label_ids )
A = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCamelCase__ ) , labelaid=lowerCamelCase__ , idalabel=lowerCamelCase__ , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
A = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowerCamelCase__ , output_all_columns=lowerCamelCase__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
A = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowerCamelCase__ , output_all_columns=lowerCamelCase__ )
# Initialize our trainer
A = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=lowerCamelCase__ , tokenizer=lowerCamelCase__ , )
# Training
if training_args.do_train:
A = None
if training_args.resume_from_checkpoint is not None:
A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A = last_checkpoint
A = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A = trainer.evaluate()
trainer.log_metrics('eval' , lowerCamelCase__ )
trainer.save_metrics('eval' , lowerCamelCase__ )
# Write model card and (optionally) push to hub
A = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase__ )
else:
trainer.create_model_card(**lowerCamelCase__ )
if __name__ == "__main__":
main()
| 109 | 0 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class snake_case_ :
"""simple docstring"""
A_ = None
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
UpperCamelCase = json.loads(feat_extract.to_json_string())
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = os.path.join(lowerCamelCase_ , '''feat_extract.json''')
feat_extract_first.to_json_file(lowerCamelCase_)
UpperCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase_)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = feat_extract_first.save_pretrained(lowerCamelCase_)[0]
check_json_file_has_correct_format(lowerCamelCase_)
UpperCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase_)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = self.feature_extraction_class()
self.assertIsNotNone(lowerCamelCase_) | 34 |
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = cva.getAffineTransform(_snake_case ,_snake_case )
return cva.warpAffine(_snake_case ,_snake_case ,(rows, cols) )
if __name__ == "__main__":
# read original image
UpperCAmelCase__ : str = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
UpperCAmelCase__ : List[str] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
UpperCAmelCase__ , UpperCAmelCase__ : str = gray_img.shape
# set different points to rotate image
UpperCAmelCase__ : List[Any] = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
UpperCAmelCase__ : Union[str, Any] = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
UpperCAmelCase__ : List[str] = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
UpperCAmelCase__ : str = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
UpperCAmelCase__ : List[Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
UpperCAmelCase__ : List[Any] = plt.figure(1)
UpperCAmelCase__ : Optional[int] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 223 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__lowercase = logging.get_logger(__name__)
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCamelCase_ : Any , **UpperCamelCase_ : int ):
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 709 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
__lowercase = logging.get_logger(__name__)
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : List[str] ):
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 305 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.