code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _A ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
_SCREAMING_SNAKE_CASE : List[str] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def lowercase_ ( ):
"""simple docstring"""
if os.name == "nt":
__UpperCAmelCase : List[str] = CursorInfo()
__UpperCAmelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCAmelCase__ , ctypes.byref(lowerCAmelCase__ ) )
__UpperCAmelCase : Optional[Any] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCAmelCase__ , ctypes.byref(lowerCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def lowercase_ ( ):
"""simple docstring"""
if os.name == "nt":
__UpperCAmelCase : int = CursorInfo()
__UpperCAmelCase : str = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCAmelCase__ , ctypes.byref(lowerCAmelCase__ ) )
__UpperCAmelCase : Optional[Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCAmelCase__ , ctypes.byref(lowerCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def lowercase_ ( ):
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 254
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCamelCase = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
_UpperCamelCase = {
'''unc-nlp/lxmert-base-uncased''': 512,
}
_UpperCamelCase = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[str] = LxmertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Dict:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __UpperCAmelCase ) != tokenize_chinese_chars
):
__UpperCAmelCase : Any = getattr(__UpperCAmelCase , normalizer_state.pop("""type""" ) )
__UpperCAmelCase : Optional[Any] = do_lower_case
__UpperCAmelCase : Optional[Any] = strip_accents
__UpperCAmelCase : str = tokenize_chinese_chars
__UpperCAmelCase : str = normalizer_class(**__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = do_lower_case
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
__UpperCAmelCase : Tuple = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 254
| 1
|
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_SCREAMING_SNAKE_CASE = 8
def lowercase( UpperCamelCase_ , UpperCamelCase_=BITS ) -> int:
'''simple docstring'''
UpperCamelCase = x.device
UpperCamelCase = (x * 255).int().clamp(0 , 255 )
UpperCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase_ )
UpperCamelCase = rearrange(UpperCamelCase_ , """d -> d 1 1""" )
UpperCamelCase = rearrange(UpperCamelCase_ , """b c h w -> b c 1 h w""" )
UpperCamelCase = ((x & mask) != 0).float()
UpperCamelCase = rearrange(UpperCamelCase_ , """b c d h w -> b (c d) h w""" )
UpperCamelCase = bits * 2 - 1
return bits
def lowercase( UpperCamelCase_ , UpperCamelCase_=BITS ) -> Tuple:
'''simple docstring'''
UpperCamelCase = x.device
UpperCamelCase = (x > 0).int()
UpperCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase_ , dtype=torch.intaa )
UpperCamelCase = rearrange(UpperCamelCase_ , """d -> d 1 1""" )
UpperCamelCase = rearrange(UpperCamelCase_ , """b (c d) h w -> b c d h w""" , d=8 )
UpperCamelCase = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 255).clamp(0.0 , 1.0 )
def lowercase( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 0.0 , UpperCamelCase_ = True , UpperCamelCase_=None , UpperCamelCase_ = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
UpperCamelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
UpperCamelCase = self.alphas_cumprod[timestep]
UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
UpperCamelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
UpperCamelCase = self.bit_scale
if self.config.clip_sample:
UpperCamelCase = torch.clamp(UpperCamelCase_ , -scale , UpperCamelCase_ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
UpperCamelCase = self._get_variance(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
UpperCamelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
UpperCamelCase = model_output.device if torch.is_tensor(UpperCamelCase_ ) else """cpu"""
UpperCamelCase = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase_ ).to(UpperCamelCase_ )
UpperCamelCase = self._get_variance(UpperCamelCase_ , UpperCamelCase_ ) ** 0.5 * eta * noise
UpperCamelCase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase_ , pred_original_sample=UpperCamelCase_ )
def lowercase( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="epsilon" , UpperCamelCase_=None , UpperCamelCase_ = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
'''simple docstring'''
UpperCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
UpperCamelCase , UpperCamelCase = torch.split(UpperCamelCase_ , sample.shape[1] , dim=1 )
else:
UpperCamelCase = None
# 1. compute alphas, betas
UpperCamelCase = self.alphas_cumprod[t]
UpperCamelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
UpperCamelCase = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
UpperCamelCase = self.bit_scale
if self.config.clip_sample:
UpperCamelCase = torch.clamp(UpperCamelCase_ , -scale , UpperCamelCase_ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
UpperCamelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCamelCase = 0
if t > 0:
UpperCamelCase = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase_ ).to(model_output.device )
UpperCamelCase = (self._get_variance(UpperCamelCase_ , predicted_variance=UpperCamelCase_ ) ** 0.5) * noise
UpperCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase_ , pred_original_sample=UpperCamelCase_ )
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def __init__( self : Any , lowerCamelCase_ : UNetaDConditionModel , lowerCamelCase_ : Union[DDIMScheduler, DDPMScheduler] , lowerCamelCase_ : Optional[float] = 1.0 , ):
"""simple docstring"""
super().__init__()
UpperCamelCase = bit_scale
UpperCamelCase = (
ddim_bit_scheduler_step if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self : Tuple , lowerCamelCase_ : Optional[int] = 256 , lowerCamelCase_ : Optional[int] = 256 , lowerCamelCase_ : Optional[int] = 50 , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , **lowerCamelCase_ : List[Any] , ):
"""simple docstring"""
UpperCamelCase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCamelCase_ , )
UpperCamelCase = decimal_to_bits(lowerCamelCase_ ) * self.bit_scale
UpperCamelCase = latents.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
UpperCamelCase = bits_to_decimal(lowerCamelCase_ )
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 165
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def __init__( self : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size , lowerCamelCase_ ):
UpperCamelCase = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
UpperCamelCase = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCamelCase = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase = self.scheduler.step(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , eta=lowerCamelCase_ , use_clipped_model_output=lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 165
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : "DiagonalGaussianDistribution"
class a__( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : str = True
@register_to_config
def __init__( self , __lowerCAmelCase = 3 , __lowerCAmelCase = 3 , __lowerCAmelCase = ("DownEncoderBlock2D",) , __lowerCAmelCase = ("UpDecoderBlock2D",) , __lowerCAmelCase = (64,) , __lowerCAmelCase = 1 , __lowerCAmelCase = "silu" , __lowerCAmelCase = 4 , __lowerCAmelCase = 32 , __lowerCAmelCase = 32 , __lowerCAmelCase = 0.18215 , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
lowerCAmelCase = Encoder(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , down_block_types=__lowerCAmelCase , block_out_channels=__lowerCAmelCase , layers_per_block=__lowerCAmelCase , act_fn=__lowerCAmelCase , norm_num_groups=__lowerCAmelCase , double_z=__lowerCAmelCase , )
# pass init params to Decoder
lowerCAmelCase = Decoder(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , up_block_types=__lowerCAmelCase , block_out_channels=__lowerCAmelCase , layers_per_block=__lowerCAmelCase , norm_num_groups=__lowerCAmelCase , act_fn=__lowerCAmelCase , )
lowerCAmelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1)
lowerCAmelCase = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1)
lowerCAmelCase = False
lowerCAmelCase = False
# only relevant if vae tiling is enabled
lowerCAmelCase = self.config.sample_size
lowerCAmelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple))
else self.config.sample_size
)
lowerCAmelCase = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
lowerCAmelCase = 0.25
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=False):
"""simple docstring"""
if isinstance(__lowerCAmelCase , (Encoder, Decoder)):
lowerCAmelCase = value
def a_ ( self , __lowerCAmelCase = True):
"""simple docstring"""
lowerCAmelCase = use_tiling
def a_ ( self):
"""simple docstring"""
self.enable_tiling(__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = True
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = {}
def fn_recursive_add_processors(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
if hasattr(__lowerCAmelCase , """set_processor"""):
lowerCAmelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , __lowerCAmelCase , __lowerCAmelCase)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
return processors
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = len(self.attn_processors.keys())
if isinstance(__lowerCAmelCase , __lowerCAmelCase) and len(__lowerCAmelCase) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(__lowerCAmelCase)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes.")
def fn_recursive_attn_processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
if hasattr(__lowerCAmelCase , """set_processor"""):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase):
module.set_processor(__lowerCAmelCase)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , __lowerCAmelCase , __lowerCAmelCase)
for name, module in self.named_children():
fn_recursive_attn_processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
self.set_attn_processor(AttnProcessor())
@apply_forward_hook
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = True):
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(__lowerCAmelCase , return_dict=__lowerCAmelCase)
if self.use_slicing and x.shape[0] > 1:
lowerCAmelCase = [self.encoder(__lowerCAmelCase) for x_slice in x.split(1)]
lowerCAmelCase = torch.cat(__lowerCAmelCase)
else:
lowerCAmelCase = self.encoder(__lowerCAmelCase)
lowerCAmelCase = self.quant_conv(__lowerCAmelCase)
lowerCAmelCase = DiagonalGaussianDistribution(__lowerCAmelCase)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = True):
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(__lowerCAmelCase , return_dict=__lowerCAmelCase)
lowerCAmelCase = self.post_quant_conv(__lowerCAmelCase)
lowerCAmelCase = self.decoder(__lowerCAmelCase)
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase)
@apply_forward_hook
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = True):
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
lowerCAmelCase = [self._decode(__lowerCAmelCase).sample for z_slice in z.split(1)]
lowerCAmelCase = torch.cat(__lowerCAmelCase)
else:
lowerCAmelCase = self._decode(__lowerCAmelCase).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = min(a.shape[2] , b.shape[2] , __lowerCAmelCase)
for y in range(__lowerCAmelCase):
lowerCAmelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = min(a.shape[3] , b.shape[3] , __lowerCAmelCase)
for x in range(__lowerCAmelCase):
lowerCAmelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = True):
"""simple docstring"""
lowerCAmelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
lowerCAmelCase = int(self.tile_latent_min_size * self.tile_overlap_factor)
lowerCAmelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
lowerCAmelCase = []
for i in range(0 , x.shape[2] , __lowerCAmelCase):
lowerCAmelCase = []
for j in range(0 , x.shape[3] , __lowerCAmelCase):
lowerCAmelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
lowerCAmelCase = self.encoder(__lowerCAmelCase)
lowerCAmelCase = self.quant_conv(__lowerCAmelCase)
row.append(__lowerCAmelCase)
rows.append(__lowerCAmelCase)
lowerCAmelCase = []
for i, row in enumerate(__lowerCAmelCase):
lowerCAmelCase = []
for j, tile in enumerate(__lowerCAmelCase):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
lowerCAmelCase = self.blend_v(rows[i - 1][j] , __lowerCAmelCase , __lowerCAmelCase)
if j > 0:
lowerCAmelCase = self.blend_h(row[j - 1] , __lowerCAmelCase , __lowerCAmelCase)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(__lowerCAmelCase , dim=3))
lowerCAmelCase = torch.cat(__lowerCAmelCase , dim=2)
lowerCAmelCase = DiagonalGaussianDistribution(__lowerCAmelCase)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = True):
"""simple docstring"""
lowerCAmelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
lowerCAmelCase = int(self.tile_sample_min_size * self.tile_overlap_factor)
lowerCAmelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
lowerCAmelCase = []
for i in range(0 , z.shape[2] , __lowerCAmelCase):
lowerCAmelCase = []
for j in range(0 , z.shape[3] , __lowerCAmelCase):
lowerCAmelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
lowerCAmelCase = self.post_quant_conv(__lowerCAmelCase)
lowerCAmelCase = self.decoder(__lowerCAmelCase)
row.append(__lowerCAmelCase)
rows.append(__lowerCAmelCase)
lowerCAmelCase = []
for i, row in enumerate(__lowerCAmelCase):
lowerCAmelCase = []
for j, tile in enumerate(__lowerCAmelCase):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
lowerCAmelCase = self.blend_v(rows[i - 1][j] , __lowerCAmelCase , __lowerCAmelCase)
if j > 0:
lowerCAmelCase = self.blend_h(row[j - 1] , __lowerCAmelCase , __lowerCAmelCase)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(__lowerCAmelCase , dim=3))
lowerCAmelCase = torch.cat(__lowerCAmelCase , dim=2)
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = sample
lowerCAmelCase = self.encode(__lowerCAmelCase).latent_dist
if sample_posterior:
lowerCAmelCase = posterior.sample(generator=__lowerCAmelCase)
else:
lowerCAmelCase = posterior.mode()
lowerCAmelCase = self.decode(__lowerCAmelCase).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase)
| 272
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ['''input_features''']
def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=16000 , __lowerCAmelCase=160 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = n_fft
lowerCAmelCase = hop_length
lowerCAmelCase = chunk_length
lowerCAmelCase = chunk_length * sampling_rate
lowerCAmelCase = self.n_samples // hop_length
lowerCAmelCase = sampling_rate
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0)
lowerCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0):
"""simple docstring"""
if attention_mask is not None:
lowerCAmelCase = np.array(__lowerCAmelCase , np.intaa)
lowerCAmelCase = []
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1)):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(__lowerCAmelCase)
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "max_length" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
lowerCAmelCase = isinstance(__lowerCAmelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
lowerCAmelCase = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray):
lowerCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa)
elif isinstance(__lowerCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
lowerCAmelCase = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech]).T]
lowerCAmelCase = BatchFeature({"""input_features""": raw_speech})
# convert into correct format for padding
lowerCAmelCase = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0)
# make sure list is in array format
lowerCAmelCase = padded_inputs.get("""input_features""").transpose(2 , 0 , 1)
lowerCAmelCase = [self._np_extract_fbank_features(__lowerCAmelCase) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCAmelCase):
lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for feature in input_features]
else:
lowerCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase)
return padded_inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__)
lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 272
| 1
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = nn.functional.normalize(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = nn.functional.normalize(__lowerCamelCase )
return torch.mm(__lowerCamelCase, normalized_text_embeds.t() )
class lowerCAmelCase__( __A ):
'''simple docstring'''
__snake_case = CLIPConfig
__snake_case = ['CLIPEncoderLayer']
def __init__( self , __lowerCamelCase ) -> int:
super().__init__(__lowercase )
_SCREAMING_SNAKE_CASE : List[str] = CLIPVisionModel(config.vision_config )
_SCREAMING_SNAKE_CASE : Dict = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__lowercase )
_SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.ones(1_7 , config.projection_dim ) , requires_grad=__lowercase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__lowercase )
_SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.ones(1_7 ) , requires_grad=__lowercase )
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.ones(3 ) , requires_grad=__lowercase )
@torch.no_grad()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : int = self.vision_model(__lowercase )[1] # pooled_output
_SCREAMING_SNAKE_CASE : Tuple = self.visual_projection(__lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_SCREAMING_SNAKE_CASE : Tuple = cosine_distance(__lowercase , self.special_care_embeds ).cpu().float().numpy()
_SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(__lowercase , self.concept_embeds ).cpu().float().numpy()
_SCREAMING_SNAKE_CASE : List[Any] = []
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_embeds.shape[0]
for i in range(__lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_SCREAMING_SNAKE_CASE : Dict = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_SCREAMING_SNAKE_CASE : Optional[Any] = special_cos_dist[i][concept_idx]
_SCREAMING_SNAKE_CASE : Tuple = self.special_care_embeds_weights[concept_idx].item()
_SCREAMING_SNAKE_CASE : List[str] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
_SCREAMING_SNAKE_CASE : Tuple = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
_SCREAMING_SNAKE_CASE : str = cos_dist[i][concept_idx]
_SCREAMING_SNAKE_CASE : List[Any] = self.concept_embeds_weights[concept_idx].item()
_SCREAMING_SNAKE_CASE : Optional[Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__lowercase )
result.append(__lowercase )
_SCREAMING_SNAKE_CASE : str = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Tuple = self.vision_model(__lowercase )[1] # pooled_output
_SCREAMING_SNAKE_CASE : str = self.visual_projection(__lowercase )
_SCREAMING_SNAKE_CASE : Tuple = cosine_distance(__lowercase , self.special_care_embeds )
_SCREAMING_SNAKE_CASE : Tuple = cosine_distance(__lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_SCREAMING_SNAKE_CASE : int = 0.0
_SCREAMING_SNAKE_CASE : Optional[int] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
_SCREAMING_SNAKE_CASE : Any = special_care * 0.01
_SCREAMING_SNAKE_CASE : int = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_SCREAMING_SNAKE_CASE : Dict = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_SCREAMING_SNAKE_CASE : List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 359
|
from timeit import timeit
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
number &= number - 1
result += 1
return result
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowerCamelCase__ ():
def do_benchmark(__lowerCamelCase ) -> None:
_SCREAMING_SNAKE_CASE : Tuple = "import __main__ as z"
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : str = timeit("z.get_set_bits_count_using_modulo_operator(25)", setup=__lowerCamelCase )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : int = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)", setup=__lowerCamelCase, )
print(f"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 325
| 0
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Any = CLIPTokenizer
UpperCamelCase__ : str = CLIPTokenizerFast
UpperCamelCase__ : Union[str, Any] = True
UpperCamelCase__ : Any = {}
UpperCamelCase__ : Any = False
def _A ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
__SCREAMING_SNAKE_CASE = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__SCREAMING_SNAKE_CASE = dict(zip(_A , range(len(_A ) ) ) )
__SCREAMING_SNAKE_CASE = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
__SCREAMING_SNAKE_CASE = {'unk_token': '<unk>'}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
def _A ( self , **_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_A )
def _A ( self , **_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_A )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'lower newer'
__SCREAMING_SNAKE_CASE = 'lower newer'
return input_text, output_text
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__SCREAMING_SNAKE_CASE = 'lower newer'
__SCREAMING_SNAKE_CASE = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
@require_ftfy
def _A ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(_A )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(_A )
self.assertListEqual(_A , _A )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__SCREAMING_SNAKE_CASE = 'xa\u0303y' + ' ' + 'x\xe3y'
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(_A )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(_A )
self.assertListEqual(_A , _A )
# Test that the tokenization is identical on unicode of space type
__SCREAMING_SNAKE_CASE = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(_A )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(_A )
self.assertListEqual(_A , _A )
# Test that the tokenization is identical on unicode of line break type
__SCREAMING_SNAKE_CASE = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(_A )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(_A )
self.assertListEqual(_A , _A )
def _A ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__SCREAMING_SNAKE_CASE = f"""{text_of_1_token} {text_of_1_token}"""
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
_A , use_fast=_A , )
__SCREAMING_SNAKE_CASE = tokenizer_r(_A , return_offsets_mapping=_A , add_special_tokens=_A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_A ) + 1, len(_A ) + 1 + len(_A )) , )
__SCREAMING_SNAKE_CASE = f""" {text}"""
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
_A , use_fast=_A , )
__SCREAMING_SNAKE_CASE = tokenizer_r(_A , return_offsets_mapping=_A , add_special_tokens=_A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_A ) + 1, 1 + len(_A ) + 1 + len(_A )) , )
def _A ( self ):
'''simple docstring'''
with self.assertRaises(_A ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def _A ( self ):
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def _A ( self ):
'''simple docstring'''
pass
| 257
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
__SCREAMING_SNAKE_CASE = jax.device_count()
__SCREAMING_SNAKE_CASE = num_samples * [prompt]
__SCREAMING_SNAKE_CASE = sd_pipe.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE = replicate(_A )
__SCREAMING_SNAKE_CASE = shard(_A )
__SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE = jax.random.split(_A , jax.device_count() )
__SCREAMING_SNAKE_CASE = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__SCREAMING_SNAKE_CASE = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE = images[0, 253:256, 253:256, -1]
__SCREAMING_SNAKE_CASE = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'stabilityai/stable-diffusion-2'
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='scheduler' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
_A , scheduler=_A , revision='bf16' , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE = scheduler_params
__SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
__SCREAMING_SNAKE_CASE = jax.device_count()
__SCREAMING_SNAKE_CASE = num_samples * [prompt]
__SCREAMING_SNAKE_CASE = sd_pipe.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE = replicate(_A )
__SCREAMING_SNAKE_CASE = shard(_A )
__SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE = jax.random.split(_A , jax.device_count() )
__SCREAMING_SNAKE_CASE = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__SCREAMING_SNAKE_CASE = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE = images[0, 253:256, 253:256, -1]
__SCREAMING_SNAKE_CASE = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 257
| 1
|
import itertools
import os
import re
a :Any = re.compile(r"([A-Z]+)([A-Z][a-z])")
a :Optional[int] = re.compile(r"([a-z\d])([A-Z])")
a :str = re.compile(r"(?<!_)_(?!_)")
a :Dict = re.compile(r"(_{2,})")
a :int = r"^\w+(\.\w+)*$"
a :Dict = r"<>:/\|?*"
def _lowercase ( __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__ : str = _uppercase_uppercase_re.sub(r"""\1_\2""" , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = _lowercase_uppercase_re.sub(r"""\1_\2""" , __lowerCAmelCase )
return name.lower()
def _lowercase ( __lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = _single_underscore_re.split(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = [_multiple_underscores_re.split(__lowerCAmelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__lowerCAmelCase ) if n != """""" )
def _lowercase ( __lowerCAmelCase ) -> Optional[Any]:
if os.path.basename(__lowerCAmelCase ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
if os.path.basename(__lowerCAmelCase ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , __lowerCAmelCase ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(__lowerCAmelCase )}-{split}'''
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = filename_prefix_for_split(__lowerCAmelCase , __lowerCAmelCase )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
return F'''{filepath}*'''
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = filename_prefix_for_split(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if shard_lengths:
SCREAMING_SNAKE_CASE__ : int = len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(__lowerCAmelCase )]
if filetype_suffix:
SCREAMING_SNAKE_CASE__ : List[str] = [filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
SCREAMING_SNAKE_CASE__ : int = prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 351
|
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __a (tf.keras.optimizers.schedules.LearningRateSchedule):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a = 1.0 , _a = None , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Any] = initial_learning_rate
SCREAMING_SNAKE_CASE__ : Tuple = warmup_steps
SCREAMING_SNAKE_CASE__ : Optional[Any] = power
SCREAMING_SNAKE_CASE__ : Optional[Any] = decay_schedule_fn
SCREAMING_SNAKE_CASE__ : Any = name
def __call__( self , _a ) -> List[Any]:
"""simple docstring"""
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.cast(_a , tf.floataa )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.cast(self.warmup_steps , tf.floataa )
SCREAMING_SNAKE_CASE__ : str = global_step_float / warmup_steps_float
SCREAMING_SNAKE_CASE__ : Optional[int] = self.initial_learning_rate * tf.math.pow(_a , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_a , )
def _a ( self ) -> List[Any]:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 0.9 , __lowerCAmelCase = 0.999 , __lowerCAmelCase = 1E-8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = None , ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowerCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__lowerCAmelCase , )
if num_warmup_steps:
SCREAMING_SNAKE_CASE__ : Dict = WarmUp(
initial_learning_rate=__lowerCAmelCase , decay_schedule_fn=__lowerCAmelCase , warmup_steps=__lowerCAmelCase , )
if weight_decay_rate > 0.0:
SCREAMING_SNAKE_CASE__ : int = AdamWeightDecay(
learning_rate=__lowerCAmelCase , weight_decay_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=__lowerCAmelCase , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.keras.optimizers.Adam(
learning_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a = 0.001 , _a = 0.9 , _a = 0.999 , _a = 1E-7 , _a = False , _a = 0.0 , _a = None , _a = None , _a = "AdamWeightDecay" , **_a , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a , _a , _a , _a , _a , _a , **_a )
SCREAMING_SNAKE_CASE__ : Tuple = weight_decay_rate
SCREAMING_SNAKE_CASE__ : Tuple = include_in_weight_decay
SCREAMING_SNAKE_CASE__ : Dict = exclude_from_weight_decay
@classmethod
def _a ( cls , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = {"""WarmUp""": WarmUp}
return super(_a , cls ).from_config(_a , custom_objects=_a )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
super(_a , self )._prepare_local(_a , _a , _a )
SCREAMING_SNAKE_CASE__ : Tuple = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def _a ( self , _a , _a=None , **_a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = list(zip(*_a ) )
return super(_a , self ).apply_gradients(zip(_a , _a ) , name=_a , **_a )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
SCREAMING_SNAKE_CASE__ : Dict = apply_state or {}
SCREAMING_SNAKE_CASE__ : List[str] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._fallback_apply_state(_a , _a )
SCREAMING_SNAKE_CASE__ : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _a ( self , _a , _a , _a=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , _a )
SCREAMING_SNAKE_CASE__ : Any = self._decay_weights_op(_a , _a , _a )
with tf.control_dependencies([decay] ):
return super(_a , self )._resource_apply_dense(_a , _a , **_a )
def _a ( self , _a , _a , _a , _a=None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self._get_lr(var.device , var.dtype.base_dtype , _a )
SCREAMING_SNAKE_CASE__ : Dict = self._decay_weights_op(_a , _a , _a )
with tf.control_dependencies([decay] ):
return super(_a , self )._resource_apply_sparse(_a , _a , _a , **_a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _a ( self , _a ) -> Tuple:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_a , _a ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_a , _a ) is not None:
return False
return True
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : List[str] = None
@property
def _a ( self ) -> str:
"""simple docstring"""
if self._accum_steps is None:
SCREAMING_SNAKE_CASE__ : Dict = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _a ( self ) -> List[str]:
"""simple docstring"""
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , _a ) -> str:
"""simple docstring"""
if not self._gradients:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_a ) , trainable=_a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_a ) != len(self._gradients ):
raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(_a )}''' )
for accum_gradient, gradient in zip(self._gradients , _a ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_a )
self._accum_steps.assign_add(1 )
def _a ( self ) -> Any:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_a ) )
| 56
| 0
|
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowercase ( _a , _a , _a=0 ):
# Format the message.
if name is None:
snake_case_ : str = None
else:
snake_case_ : Optional[int] = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(50 - spaces ) + '''s}'''
snake_case_ : Optional[Any] = fmt.format(_a )
# Print and recurse (if needed).
if isinstance(_a , _a ):
if msg is not None:
print(_a )
for k in val.keys():
recursive_print(_a , val[k] , spaces + 2 )
elif isinstance(_a , torch.Tensor ):
print(_a , ''':''' , val.size() )
else:
print(_a , ''':''' , _a )
def __lowercase ( _a , _a , _a , _a , _a ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
snake_case_ : Tuple = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case_ : str = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case_ : Any = param.view(*_a )
snake_case_ : Tuple = param.transpose(0 , 2 )
snake_case_ : int = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case_ : Tuple = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case_ : Any = param.view(*_a )
snake_case_ : Optional[Any] = param.transpose(0 , 1 ).contiguous()
snake_case_ : Union[str, Any] = param.view(*_a )
return param
def __lowercase ( _a , _a , _a ):
# The converted output model.
snake_case_ : List[str] = {}
# old versions did not store training args
snake_case_ : List[str] = input_state_dict.get('''args''' , _a )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case_ : int = ds_args.padded_vocab_size
snake_case_ : List[Any] = ds_args.max_position_embeddings
snake_case_ : List[Any] = ds_args.hidden_size
snake_case_ : str = ds_args.num_layers
snake_case_ : int = ds_args.num_attention_heads
snake_case_ : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case_ : Any = config.n_head
# The hidden_size per head.
snake_case_ : Any = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case_ : Any = input_state_dict['''checkpoint_version''']
else:
snake_case_ : str = 0.0
# The model.
snake_case_ : int = input_state_dict['''model''']
# The language model.
snake_case_ : Union[str, Any] = model['''language_model''']
# The embeddings.
snake_case_ : str = lm['''embedding''']
# The word embeddings.
snake_case_ : Dict = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
snake_case_ : List[str] = word_embeddings[: config.vocab_size, :]
snake_case_ : int = word_embeddings
# The position embeddings.
snake_case_ : List[str] = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case_ : str = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
snake_case_ : Dict = pos_embeddings
# The transformer.
snake_case_ : Union[str, Any] = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
snake_case_ : Dict = re.compile(r'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
snake_case_ : Dict = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case_ : Union[str, Any] = layer_re.match(_a )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case_ : Tuple = int(m.group(1 ) )
# The name of the operation.
snake_case_ : List[Any] = m.group(2 )
# Is it a weight or a bias?
snake_case_ : int = m.group(3 )
# The name of the layer.
snake_case_ : Any = f"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
snake_case_ : str = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2'''
snake_case_ : Optional[Any] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case_ : Any = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _a , _a )
snake_case_ : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case_ : Optional[Any] = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case_ : List[str] = masked_bias
snake_case_ : Any = fix_query_key_value_ordering(_a , _a , 3 , _a , _a )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case_ : List[Any] = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case_ : Tuple = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case_ : Tuple = fix_query_key_value_ordering(_a , _a , 3 , _a , _a )
# Store. No change of shape.
snake_case_ : List[str] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case_ : Union[str, Any] = megatron_to_transformers[op_name]
snake_case_ : List[Any] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case_ : Optional[int] = megatron_to_transformers[op_name]
snake_case_ : Union[str, Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case_ : Optional[int] = transformer['''final_layernorm.weight''']
snake_case_ : int = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case_ : str = word_embeddings
# It should be done!
return output_state_dict
def __lowercase ( ):
# Create the argument parser.
snake_case_ : Dict = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''' , type=_a , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , )
parser.add_argument(
'''--config_file''' , default='''''' , type=_a , help='''An optional config json file describing the pre-trained model.''' , )
snake_case_ : List[str] = parser.parse_args()
# Extract the basename.
snake_case_ : int = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
snake_case_ : str = torch.load(_a , map_location='''cpu''' )
else:
snake_case_ : Any = torch.load(args.path_to_checkpoint , map_location='''cpu''' )
snake_case_ : Tuple = input_state_dict.get('''args''' , _a )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case_ : List[Any] = '''gelu_fast'''
elif ds_args.openai_gelu:
snake_case_ : int = '''gelu_new'''
else:
snake_case_ : List[str] = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
snake_case_ : List[str] = '''gelu_new'''
# Spell out all parameters in case the defaults change.
snake_case_ : str = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=_a , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=_a , summary_activation=_a , summary_proj_to_labels=_a , summary_first_dropout=0.1 , scale_attn_weights=_a , use_cache=_a , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
snake_case_ : List[Any] = GPTaConfig.from_json_file(args.config_file )
snake_case_ : str = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''' )
snake_case_ : Union[str, Any] = convert_megatron_checkpoint(_a , _a , _a )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_a , _a )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case_ : Dict = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case_ : List[Any] = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
snake_case_ : int = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"Unrecognized tokenizer_type {tokenizer_type}" )
else:
snake_case_ : str = '''gpt2'''
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained(_a )
snake_case_ : Optional[int] = type(_a ).__name__
snake_case_ : List[str] = tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(_a )
# Save tokenizer based on args
print(f"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(_a )
# Store the state_dict to file.
snake_case_ : Any = os.path.join(_a , '''pytorch_model.bin''' )
print(f"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(_a , _a )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 264
|
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Dict = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
lowercase__ : str = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def __lowercase ( _a , _a ):
snake_case_ : Optional[int] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
snake_case_ : List[Any] = int(re.match(r'''.*layer_(\d*).*''' , _a )[1] )
layer_number -= 3
return f"h.{layer_number}." + key
def __lowercase ( _a ):
if dtype == torch.bool:
return 1 / 8
snake_case_ : Dict = re.search(r'''[^\d](\d+)$''' , str(_a ) )
if bit_search is None:
raise ValueError(f"`dtype` is not a valid dtype: {dtype}." )
snake_case_ : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def __lowercase ( _a , _a , _a , _a , _a ):
# Construct model
if bloom_config_file == "":
snake_case_ : int = BloomConfig()
else:
snake_case_ : List[str] = BloomConfig.from_json_file(_a )
if shard_model:
snake_case_ : List[str] = os.listdir(_a )
snake_case_ : int = sorted(filter(lambda _a : s.startswith('''layer''' ) and "model_00" in s , _a ) )
snake_case_ : List[str] = {'''weight_map''': {}, '''metadata''': {}}
snake_case_ : Any = 0
snake_case_ : Union[str, Any] = None
snake_case_ : List[str] = BloomConfig()
for j, file in enumerate(_a ):
print('''Processing file: {}'''.format(_a ) )
snake_case_ : Dict = None
for i in range(_a ):
# load all TP files
snake_case_ : Union[str, Any] = file.replace('''model_00''' , f"model_0{i}" )
snake_case_ : List[str] = torch.load(os.path.join(_a , _a ) , map_location='''cpu''' )
# Rename keys in the transformers names
snake_case_ : str = list(temp.keys() )
for key in keys:
snake_case_ : Any = temp.pop(_a )
if tensors is None:
snake_case_ : Any = temp
else:
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case_ : Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case_ : Any = tensors[key] / pretraining_tp
torch.save(
_a , os.path.join(
_a , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_a ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
snake_case_ : List[str] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
snake_case_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_a ) ).zfill(5 ) )
snake_case_ : int = BloomConfig()
snake_case_ : Any = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
snake_case_ : Dict = total_size
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_a , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
snake_case_ : Tuple = json.dumps(_a , indent=2 , sort_keys=_a ) + '''\n'''
f.write(_a )
else:
snake_case_ : Union[str, Any] = BloomModel(_a )
snake_case_ : List[str] = os.listdir(_a )
snake_case_ : Dict = sorted(filter(lambda _a : s.startswith('''layer''' ) and "model_00" in s , _a ) )
snake_case_ : List[Any] = None
for i, file in enumerate(_a ):
snake_case_ : Optional[Any] = None
for i in range(_a ):
# load all TP files
snake_case_ : List[str] = file.replace('''model_00''' , f"model_0{i}" )
snake_case_ : Optional[Any] = torch.load(os.path.join(_a , _a ) , map_location='''cpu''' )
# Rename keys in the transformers names
snake_case_ : str = list(temp.keys() )
for key in keys:
snake_case_ : str = temp.pop(_a )
if tensors is None:
snake_case_ : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case_ : Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case_ : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=_a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case_ : Union[str, Any] = tensors[key] / pretraining_tp
snake_case_ : Any = model.load_state_dict(_a , strict=_a )
assert not other_keys.unexpected_keys, f"The keys {other_keys.unexpected_keys} are unexpected"
if missing_keys is None:
snake_case_ : Optional[int] = set(other_keys.missing_keys )
else:
snake_case_ : Tuple = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"The keys {missing_keys} are missing"
# Save pytorch-model
os.makedirs(_a , exist_ok=_a )
snake_case_ : List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case_ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" )
if config.torch_dtype is not None:
snake_case_ : Optional[Any] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _a )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
lowercase__ : List[Any] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 264
| 1
|
from __future__ import annotations
def _a ( a :dict , a :str ) -> set[str]:
a , a = set(a ), [start]
while stack:
a = stack.pop()
explored.add(a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(a )
return explored
UpperCAmelCase__ = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 360
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = ['''image_processor''', '''tokenizer''']
__snake_case = '''CLIPImageProcessor'''
__snake_case = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Dict , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : Optional[Any] ) ->List[str]:
"""simple docstring"""
a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __UpperCAmelCase , )
a = kwargs.pop('''feature_extractor''' )
a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self : List[str] , __UpperCAmelCase : Any=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Any=None , **__UpperCAmelCase : str ) ->Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
a = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
a = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : str , **__UpperCAmelCase : Tuple ) ->Any:
"""simple docstring"""
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
a = self.tokenizer.model_input_names
a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __UpperCAmelCase , )
return self.image_processor_class
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __UpperCAmelCase , )
return self.image_processor
| 26
| 0
|
import numpy as np
def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : str ,__UpperCamelCase : Optional[Any] ):
lowerCAmelCase_ : Any = int(np.ceil((x_end - xa) / h ) )
lowerCAmelCase_ : Union[str, Any] = np.zeros((n + 1,) )
lowerCAmelCase_ : Tuple = ya
lowerCAmelCase_ : Union[str, Any] = xa
for k in range(__UpperCamelCase ):
lowerCAmelCase_ : int = f(__UpperCamelCase ,y[k] )
lowerCAmelCase_ : List[Any] = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
lowerCAmelCase_ : List[str] = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
lowerCAmelCase_ : str = f(x + h ,y[k] + h * ka )
lowerCAmelCase_ : Dict = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
_a = IFPipeline
_a = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCAmelCase__ ( self : List[str]):
return self._get_dummy_components()
def UpperCAmelCase__ ( self : List[str] , A_ : List[Any] , A_ : Any=0):
if str(A_).startswith('''mps'''):
lowerCAmelCase_ : List[Any] = torch.manual_seed(A_)
else:
lowerCAmelCase_ : List[str] = torch.Generator(device=A_).manual_seed(A_)
lowerCAmelCase_ : Optional[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self : int):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''')
def UpperCAmelCase__ ( self : str):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1)
def UpperCAmelCase__ ( self : str):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def UpperCAmelCase__ ( self : int):
self._test_save_load_local()
def UpperCAmelCase__ ( self : str):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase__ ( self : Optional[int]):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str]):
# if
lowerCAmelCase_ : Dict = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa)
lowerCAmelCase_ : Dict = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=A_ , tokenizer=A_)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''')
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''')
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(A_ , A_ , A_ , A_)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowerCAmelCase_ : List[str] = IFImgaImgPipeline(**pipe_a.components)
lowerCAmelCase_ : Any = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(A_ , A_ , A_ , A_)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowerCAmelCase_ : int = IFInpaintingPipeline(**pipe_a.components)
lowerCAmelCase_ : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(A_ , A_ , A_ , A_)
def UpperCAmelCase__ ( self : int , A_ : Optional[int] , A_ : Any , A_ : str , A_ : Union[str, Any]):
# pipeline 1
_start_torch_memory_measurement()
lowerCAmelCase_ : Optional[Any] = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : Optional[Any] = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , num_inference_steps=2 , generator=A_ , output_type='''np''' , )
lowerCAmelCase_ : Dict = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowerCAmelCase_ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
lowerCAmelCase_ : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''')
assert_mean_pixel_difference(A_ , A_)
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase_ : List[str] = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : Optional[int] = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , generator=A_ , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowerCAmelCase_ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowerCAmelCase_ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''')
assert_mean_pixel_difference(A_ , A_)
def UpperCAmelCase__ ( self : int , A_ : Optional[int] , A_ : Any , A_ : List[str] , A_ : List[str]):
# pipeline 1
_start_torch_memory_measurement()
lowerCAmelCase_ : Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : Tuple = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : Any = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , num_inference_steps=2 , generator=A_ , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowerCAmelCase_ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
lowerCAmelCase_ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''')
assert_mean_pixel_difference(A_ , A_)
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase_ : int = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : List[str] = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowerCAmelCase_ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowerCAmelCase_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''')
assert_mean_pixel_difference(A_ , A_)
def UpperCAmelCase__ ( self : str , A_ : Optional[Any] , A_ : Optional[Any] , A_ : Dict , A_ : List[str]):
# pipeline 1
_start_torch_memory_measurement()
lowerCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : int = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1)).to(A_)
lowerCAmelCase_ : Optional[Any] = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : Any = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , num_inference_steps=2 , generator=A_ , output_type='''np''' , )
lowerCAmelCase_ : List[Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowerCAmelCase_ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
lowerCAmelCase_ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''')
assert_mean_pixel_difference(A_ , A_)
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase_ : Optional[Any] = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : Optional[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1)).to(A_)
lowerCAmelCase_ : int = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase_ : Tuple = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowerCAmelCase_ : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowerCAmelCase_ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''')
assert_mean_pixel_difference(A_ , A_)
def UpperCamelCase( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 103
| 1
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__UpperCAmelCase : Optional[int] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
__UpperCAmelCase : List[str] = 10
__UpperCAmelCase : List[str] = 256
def a ( SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) < MIN_NUM_TOKENS:
return None
UpperCamelCase : Dict = MinHash(num_perm=SCREAMING_SNAKE_CASE_ )
for token in set(SCREAMING_SNAKE_CASE_ ):
min_hash.update(token.encode() )
return min_hash
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE_ ) if len(t.strip() ) > 0}
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , *,
__SCREAMING_SNAKE_CASE = 0.85 , ):
"""simple docstring"""
UpperCamelCase : Optional[int] = duplication_jaccard_threshold
UpperCamelCase : str = NUM_PERM
UpperCamelCase : Any = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
UpperCamelCase : int = defaultdict(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = self._index.query(__SCREAMING_SNAKE_CASE )
if code_key in self._index.keys:
print(f"""Duplicate key {code_key}""" )
return
self._index.insert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__SCREAMING_SNAKE_CASE )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = []
for base, duplicates in self._duplicate_clusters.items():
UpperCamelCase : Optional[Any] = [base] + list(__SCREAMING_SNAKE_CASE )
# reformat the cluster to be a list of dict
UpperCamelCase : Optional[int] = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(__SCREAMING_SNAKE_CASE )
return duplicate_clusters
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = self.get_duplicate_clusters()
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Dict = element
UpperCamelCase : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def a ( SCREAMING_SNAKE_CASE_ : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE_ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def a ( SCREAMING_SNAKE_CASE_ : Type[Dataset] , SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE_ ) ) , max_queue_size=1_0_0 ) ):
di.add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : List[Any] = get_tokens(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = get_tokens(SCREAMING_SNAKE_CASE_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__UpperCAmelCase : Dict = None
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
UpperCamelCase : Optional[int] = []
for elementa in cluster:
UpperCamelCase : Tuple = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
UpperCamelCase : Union[str, Any] = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
UpperCamelCase : List[Any] = 1
extremes.append(SCREAMING_SNAKE_CASE_ )
return extremes
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
global _shared_dataset
UpperCamelCase : List[Any] = dataset
UpperCamelCase : Tuple = []
UpperCamelCase : List[str] = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) , total=len(SCREAMING_SNAKE_CASE_ ) , ):
extremes_list.append(SCREAMING_SNAKE_CASE_ )
return extremes_list
def a ( SCREAMING_SNAKE_CASE_ : Type[Dataset] , SCREAMING_SNAKE_CASE_ : float = 0.85 ):
"""simple docstring"""
UpperCamelCase : List[str] = make_duplicate_clusters(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
UpperCamelCase : Optional[Any] = {}
UpperCamelCase : Union[str, Any] = find_extremes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for extremes in extremes_clusters:
for element in extremes:
UpperCamelCase : Any = element
UpperCamelCase : str = duplicate_indices - set(extreme_dict.keys() )
UpperCamelCase : List[str] = dataset.filter(lambda SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
UpperCamelCase : Union[str, Any] = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
UpperCamelCase : Tuple = extreme_dict[element['''base_index''']]['''copies''']
print(F"""Original dataset size: {len(SCREAMING_SNAKE_CASE_ )}""" )
print(F"""Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE_ )}""" )
print(F"""Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE_ )}""" )
print(F"""Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE_ )}""" )
print(F"""Filtered dataset size: {len(SCREAMING_SNAKE_CASE_ )}""" )
return ds_filter, duplicate_clusters
| 315
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Union[str, Any] = ""
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def a ( ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : List[Any] = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print('''Processing...''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = update_image_and_anno(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for index, image in enumerate(SCREAMING_SNAKE_CASE_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCamelCase : Optional[int] = random_chars(3_2 )
UpperCamelCase : List[Any] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
UpperCamelCase : int = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F"""Success {index+1}/{len(SCREAMING_SNAKE_CASE_ )} with {file_name}""" )
UpperCamelCase : Any = []
for anno in new_annos[index]:
UpperCamelCase : Tuple = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(SCREAMING_SNAKE_CASE_ )
with open(F"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Any = []
UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '''*.txt''' ) ):
UpperCamelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE_ ) as in_file:
UpperCamelCase : List[str] = in_file.readlines()
UpperCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{label_name}.jpg""" )
UpperCamelCase : Union[str, Any] = []
for obj_list in obj_lists:
UpperCamelCase : str = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE_ )
labels.append(SCREAMING_SNAKE_CASE_ )
return img_paths, labels
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 1 ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : str = []
UpperCamelCase : int = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Tuple = []
UpperCamelCase : Optional[int] = img_list[idx]
path_list.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = anno_list[idx]
UpperCamelCase : Optional[Any] = cva.imread(SCREAMING_SNAKE_CASE_ )
if flip_type == 1:
UpperCamelCase : Optional[Any] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCamelCase : List[str] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
UpperCamelCase : Union[str, Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(SCREAMING_SNAKE_CASE_ )
new_imgs_list.append(SCREAMING_SNAKE_CASE_ )
return new_imgs_list, new_annos_lists, path_list
def a ( SCREAMING_SNAKE_CASE_ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
UpperCamelCase : Any = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 315
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 188
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __magic_name__ :
'''simple docstring'''
def __init__( self, lowercase_, lowercase_=13, lowercase_=7, lowercase_=True, lowercase_=True, lowercase_=False, lowercase_=True, lowercase_=99, lowercase_=32, lowercase_=5, lowercase_=4, lowercase_=37, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=16, lowercase_=2, lowercase_=0.02, lowercase_=3, lowercase_=4, lowercase_=None, ) -> List[Any]:
"""simple docstring"""
a__ =parent
a__ =batch_size
a__ =seq_length
a__ =is_training
a__ =use_input_mask
a__ =use_token_type_ids
a__ =use_labels
a__ =vocab_size
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =type_vocab_size
a__ =type_sequence_label_size
a__ =initializer_range
a__ =num_labels
a__ =num_choices
a__ =scope
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
a__ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
a__ =None
if self.use_input_mask:
a__ =random_attention_mask([self.batch_size, self.seq_length] )
a__ =None
if self.use_token_type_ids:
a__ =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
a__ =None
a__ =None
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size], self.type_sequence_label_size )
a__ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
a__ =ids_tensor([self.batch_size], self.num_choices )
a__ =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowercase_, initializer_range=self.initializer_range, use_stable_embedding=lowercase_, )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> List[str]:
"""simple docstring"""
a__ =OpenLlamaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, attention_mask=lowercase_ )
a__ =model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Any:
"""simple docstring"""
a__ =True
a__ =OpenLlamaModel(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(
lowercase_, attention_mask=lowercase_, encoder_hidden_states=lowercase_, encoder_attention_mask=lowercase_, )
a__ =model(
lowercase_, attention_mask=lowercase_, encoder_hidden_states=lowercase_, )
a__ =model(lowercase_, attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> List[str]:
"""simple docstring"""
a__ =OpenLlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, attention_mask=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> List[Any]:
"""simple docstring"""
a__ =True
a__ =True
a__ =OpenLlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
a__ =model(
lowercase_, attention_mask=lowercase_, encoder_hidden_states=lowercase_, encoder_attention_mask=lowercase_, use_cache=lowercase_, )
a__ =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a__ =ids_tensor((self.batch_size, 3), config.vocab_size )
a__ =ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
a__ =torch.cat([input_ids, next_tokens], dim=-1 )
a__ =torch.cat([input_mask, next_mask], dim=-1 )
a__ =model(
lowercase_, attention_mask=lowercase_, encoder_hidden_states=lowercase_, encoder_attention_mask=lowercase_, output_hidden_states=lowercase_, )['''hidden_states'''][0]
a__ =model(
lowercase_, attention_mask=lowercase_, encoder_hidden_states=lowercase_, encoder_attention_mask=lowercase_, past_key_values=lowercase_, output_hidden_states=lowercase_, )['''hidden_states'''][0]
# select random slice
a__ =ids_tensor((1,), output_from_past.shape[-1] ).item()
a__ =output_from_no_past[:, -3:, random_slice_idx].detach()
a__ =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_, lowercase_, atol=1E-3 ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
a__ =self.prepare_config_and_inputs()
(
(
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
),
) =config_and_inputs
a__ ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowerCamelCase__ : Optional[Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : List[str] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : int = False
lowerCamelCase__ : Any = False
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =OpenLlamaModelTester(self )
a__ =ConfigTester(self, config_class=lowercase_, hidden_size=37 )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ =type
self.model_tester.create_and_check_model(*lowercase_ )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
a__, a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =3
a__ =input_dict['''input_ids''']
a__ =input_ids.ne(1 ).to(lowercase_ )
a__ =ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
a__ =OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, attention_mask=lowercase_, labels=lowercase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__, a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =3
a__ ='''single_label_classification'''
a__ =input_dict['''input_ids''']
a__ =input_ids.ne(1 ).to(lowercase_ )
a__ =ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
a__ =OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, attention_mask=lowercase_, labels=lowercase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
a__, a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =3
a__ ='''multi_label_classification'''
a__ =input_dict['''input_ids''']
a__ =input_ids.ne(1 ).to(lowercase_ )
a__ =ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float )
a__ =OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, attention_mask=lowercase_, labels=lowercase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def _UpperCAmelCase ( self, lowercase_ ) -> Optional[Any]:
"""simple docstring"""
a__, a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =ids_tensor([1, 10], config.vocab_size )
a__ =ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a__ =OpenLlamaModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
a__ =original_model(lowercase_ ).last_hidden_state
a__ =original_model(lowercase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a__ ={'''type''': scaling_type, '''factor''': 10.0}
a__ =OpenLlamaModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
a__ =scaled_model(lowercase_ ).last_hidden_state
a__ =scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_, lowercase_, atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_, lowercase_, atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_, lowercase_, atol=1E-5 ) )
| 188
| 1
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ ( A__ ):
'''simple docstring'''
lowercase_ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : Dict , **_lowerCAmelCase : Optional[int] ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE_ = deprecated_arg[3:]
setattr(self , __A , not kwargs.pop(__A ) )
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}" )
SCREAMING_SNAKE_CASE_ = kwargs.pop('torchscript' , self.torchscript )
SCREAMING_SNAKE_CASE_ = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
SCREAMING_SNAKE_CASE_ = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**__A )
lowercase_ = field(default=A__ , metadata={"help": "Trace the models using torchscript"} )
lowercase_ = field(default=A__ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
lowercase_ = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def lowerCAmelCase_ ( self : str ):
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
SCREAMING_SNAKE_CASE_ = torch.device('cpu' )
SCREAMING_SNAKE_CASE_ = 0
elif is_torch_tpu_available():
SCREAMING_SNAKE_CASE_ = xm.xla_device()
SCREAMING_SNAKE_CASE_ = 0
else:
SCREAMING_SNAKE_CASE_ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
SCREAMING_SNAKE_CASE_ = torch.cuda.device_count()
return device, n_gpu
@property
def lowerCAmelCase_ ( self : str ):
return is_torch_tpu_available() and self.tpu
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def lowerCAmelCase_ ( self : Any ):
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def lowerCAmelCase_ ( self : Tuple ):
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def lowerCAmelCase_ ( self : List[Any] ):
return self.n_gpu > 0
| 351
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCAmelCase_ ( __UpperCAmelCase : str = "isbn/0140328726" ) -> dict:
SCREAMING_SNAKE_CASE_ = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
SCREAMING_SNAKE_CASE_ = f"{olid} is not a valid Open Library olid"
raise ValueError(__UpperCAmelCase )
return requests.get(f"https://openlibrary.org/{new_olid}.json" ).json()
def UpperCAmelCase_ ( __UpperCAmelCase : dict ) -> dict:
SCREAMING_SNAKE_CASE_ = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
SCREAMING_SNAKE_CASE_ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
SCREAMING_SNAKE_CASE_ = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
SCREAMING_SNAKE_CASE_ = data['First sentence']['value']
for key, value in data.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = ', '.join(__UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCamelCase__ : Optional[Any] = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
lowerCamelCase__ : Union[str, Any] = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 210
| 0
|
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : list[float] ):
'''simple docstring'''
_lowerCAmelCase = 0.00
_lowerCAmelCase = 0
for resistor in resistors:
if resistor <= 0:
_lowerCAmelCase = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
first_sum += 1 / float(SCREAMING_SNAKE_CASE_ )
index += 1
return 1 / first_sum
def __a(SCREAMING_SNAKE_CASE_ : list[float] ):
'''simple docstring'''
_lowerCAmelCase = 0.00
_lowerCAmelCase = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_lowerCAmelCase = F'''Resistor at index {index} has a negative value!'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158
|
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __a():
'''simple docstring'''
_lowerCAmelCase = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
TestCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Parse args
_lowerCAmelCase , _lowerCAmelCase = parser.parse_known_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ):
parser.print_help()
exit(1 )
_lowerCAmelCase = parse_unknown_args(SCREAMING_SNAKE_CASE_ )
# Run
_lowerCAmelCase = args.func(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 158
| 1
|
import math
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = input('''Enter message: ''' )
_lowerCamelCase : Optional[Any] = int(input(F'''Enter key [2-{len(A__ ) - 1}]: ''' ) )
_lowerCamelCase : Union[str, Any] = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
_lowerCamelCase : List[str] = encrypt_message(A__, A__ )
elif mode.lower().startswith('''d''' ):
_lowerCamelCase : Dict = decrypt_message(A__, A__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'''Output:\n{text + "|"}''' )
def snake_case_ ( A_ : int, A_ : str ):
'''simple docstring'''
_lowerCamelCase : List[Any] = [""""""] * key
for col in range(A__ ):
_lowerCamelCase : Tuple = col
while pointer < len(A__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(A__ )
def snake_case_ ( A_ : int, A_ : str ):
'''simple docstring'''
_lowerCamelCase : Tuple = math.ceil(len(A__ ) / key )
_lowerCamelCase : int = key
_lowerCamelCase : List[str] = (num_cols * num_rows) - len(A__ )
_lowerCamelCase : Any = [""""""] * num_cols
_lowerCamelCase : Dict = 0
_lowerCamelCase : Any = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
_lowerCamelCase : Optional[int] = 0
row += 1
return "".join(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 351
|
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowerCAmelCase__ = logging.getLogger(__name__)
class __snake_case ( _lowercase):
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=None ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , question_encoder_tokenizer=__lowerCAmelCase , generator_tokenizer=__lowerCAmelCase , index=__lowerCAmelCase , init_retrieval=__lowerCAmelCase , )
_lowerCamelCase : Dict = None
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : int ):
"""simple docstring"""
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
_lowerCamelCase : List[str] = self._infer_socket_ifname()
# avoid clash with the NCCL port
_lowerCamelCase : Dict = str(distributed_port + 1 )
_lowerCamelCase : str = dist.new_group(ranks=__lowerCAmelCase , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple=torch.floataa ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = torch.empty(__lowerCAmelCase , dtype=__lowerCAmelCase )
dist.scatter(__lowerCAmelCase , src=0 , scatter_list=__lowerCAmelCase , group=self.process_group )
return target_tensor
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[str] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_lowerCamelCase : str = next((addr for addr in addrs if addr.startswith('''e''' )) , __lowerCAmelCase )
return ifname
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : int ):
"""simple docstring"""
if not dist.is_initialized():
_lowerCamelCase , _lowerCamelCase : Any = self._main_retrieve(__lowerCAmelCase , __lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCAmelCase )
# distributed training
_lowerCamelCase : Dict = dist.get_world_size(group=self.process_group )
# gather logic
_lowerCamelCase : str = None
if self._is_main():
_lowerCamelCase : List[Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCAmelCase )]
dist.gather(torch.tensor(__lowerCAmelCase ) , dst=0 , gather_list=__lowerCAmelCase , group=self.process_group )
# scatter logic
_lowerCamelCase : int = question_hidden_states.shape[0]
_lowerCamelCase : str = []
_lowerCamelCase : Optional[int] = []
if self._is_main():
assert len(__lowerCAmelCase ) == world_size
_lowerCamelCase , _lowerCamelCase : Tuple = self._main_retrieve(torch.cat(__lowerCAmelCase ).numpy() , __lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : List[str] = torch.tensor(__lowerCAmelCase ), torch.tensor(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self._chunk_tensor(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self._chunk_tensor(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : int = self._scattered(__lowerCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
_lowerCamelCase : str = self._scattered(__lowerCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCAmelCase )
| 175
| 0
|
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _UpperCAmelCase( lowerCamelCase ):
@slow
@require_torch
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''')
_UpperCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''')
_UpperCamelCase = bertabert.config.encoder.vocab_size
_UpperCamelCase = tokenizer.sep_token_id
_UpperCamelCase = tokenizer.cls_token_id
_UpperCamelCase = 1_28
_UpperCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''')
_UpperCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''')
_UpperCamelCase = train_dataset.select(range(32))
_UpperCamelCase = val_dataset.select(range(16))
_UpperCamelCase = 4
def _map_to_encoder_decoder_inputs(__a):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCamelCase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__a , max_length=5_12)
_UpperCamelCase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__a , max_length=1_28)
_UpperCamelCase = inputs.input_ids
_UpperCamelCase = inputs.attention_mask
_UpperCamelCase = outputs.input_ids
_UpperCamelCase = outputs.input_ids.copy()
_UpperCamelCase = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
_UpperCamelCase = outputs.attention_mask
assert all(len(__a) == 5_12 for x in inputs.input_ids)
assert all(len(__a) == 1_28 for x in outputs.input_ids)
return batch
def _compute_metrics(__a):
_UpperCamelCase = pred.label_ids
_UpperCamelCase = pred.predictions
# all unnecessary tokens are removed
_UpperCamelCase = tokenizer.batch_decode(__a , skip_special_tokens=__a)
_UpperCamelCase = tokenizer.batch_decode(__a , skip_special_tokens=__a)
_UpperCamelCase = sum([int(pred_str[i] == label_str[i]) for i in range(len(__a))]) / len(__a)
return {"accuracy": accuracy}
# map train dataset
_UpperCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__a , batch_size=__a , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
_UpperCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__a , batch_size=__a , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = SeqaSeqTrainingArguments(
output_dir=__a , per_device_train_batch_size=__a , per_device_eval_batch_size=__a , predict_with_generate=__a , evaluation_strategy='''steps''' , do_train=__a , do_eval=__a , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCamelCase = SeqaSeqTrainer(
model=__a , args=__a , compute_metrics=_compute_metrics , train_dataset=__a , eval_dataset=__a , tokenizer=__a , )
# start training
trainer.train()
| 194
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 194
| 1
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = s.rsplit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return new.join(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = {}
UpperCamelCase = ["group_1", "group_2", "group_3", "group_4"]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
UpperCamelCase = key.replace(F"{group_key}." , F"{group_key}.group." )
if "res_path" in key:
UpperCamelCase = key.replace("res_path." , "res_path.path." )
if key.endswith(".w" ):
UpperCamelCase = rreplace(_SCREAMING_SNAKE_CASE , ".w" , ".weight" , 1 )
if key.endswith(".b" ):
UpperCamelCase = rreplace(_SCREAMING_SNAKE_CASE , ".b" , ".bias" , 1 )
UpperCamelCase = value.float()
return upgrade
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
from dall_e import Encoder
UpperCamelCase = Encoder()
if os.path.exists(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = torch.load(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = ckpt.state_dict()
encoder.load_state_dict(_SCREAMING_SNAKE_CASE )
if config_path is not None:
UpperCamelCase = FlavaImageCodebookConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase = FlavaImageCodebookConfig()
UpperCamelCase = FlavaImageCodebook(_SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase = encoder.state_dict()
UpperCamelCase = upgrade_state_dict(_SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
UpperCamelCase = hf_model.state_dict()
UpperCamelCase = count_parameters(_SCREAMING_SNAKE_CASE )
UpperCamelCase = count_parameters(_SCREAMING_SNAKE_CASE )
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase__ = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 244
|
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCamelCase = [p / w for p, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCamelCase = sorted(_SCREAMING_SNAKE_CASE )
# declaring useful variables
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCamelCase = sorted_profit_by_weight[length - i - 1]
UpperCamelCase = profit_by_weight.index(_SCREAMING_SNAKE_CASE )
UpperCamelCase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
lowerCAmelCase__ = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
lowerCAmelCase__ = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
lowerCAmelCase__ = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 244
| 1
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : str = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_A : ClassVar[Features] = Features({'''image''': Image()} )
_A : ClassVar[Features] = Features({'''labels''': ClassLabel} )
_A : str = "image"
_A : str = "labels"
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , lowerCAmelCase__ ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
__SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self )
__SCREAMING_SNAKE_CASE : List[Any] = self.label_schema.copy()
__SCREAMING_SNAKE_CASE : Dict = features[self.label_column]
__SCREAMING_SNAKE_CASE : List[Any] = label_schema
return task_template
@property
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 112
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( _lowerCamelCase: list[int] ):
if not nums:
return 0
__SCREAMING_SNAKE_CASE : Optional[int] = nums[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for num in nums[1:]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = (
max_excluding + num,
max(_lowerCamelCase , _lowerCamelCase ),
)
return max(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112
| 1
|
"""simple docstring"""
from copy import deepcopy
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A = None , __A = None ) -> None:
if arr is None and size is not None:
lowerCAmelCase_ :Tuple = size
lowerCAmelCase_ :Dict = [0] * size
elif arr is not None:
self.init(__A )
else:
raise ValueError("""Either arr or size must be specified""" )
def __lowerCAmelCase ( self , __A ) -> None:
lowerCAmelCase_ :int = len(__A )
lowerCAmelCase_ :List[Any] = deepcopy(__A )
for i in range(1 , self.size ):
lowerCAmelCase_ :List[Any] = self.next_(__A )
if j < self.size:
self.tree[j] += self.tree[i]
def __lowerCAmelCase ( self ) -> list[int]:
lowerCAmelCase_ :Tuple = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowerCAmelCase_ :Tuple = self.next_(__A )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __lowerCAmelCase ( __A ) -> int:
return index + (index & (-index))
@staticmethod
def __lowerCAmelCase ( __A ) -> int:
return index - (index & (-index))
def __lowerCAmelCase ( self , __A , __A ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCAmelCase_ :Optional[Any] = self.next_(__A )
def __lowerCAmelCase ( self , __A , __A ) -> None:
self.add(__A , value - self.get(__A ) )
def __lowerCAmelCase ( self , __A ) -> int:
if right == 0:
return 0
lowerCAmelCase_ :List[Any] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCAmelCase_ :List[str] = self.prev(__A )
return result
def __lowerCAmelCase ( self , __A , __A ) -> int:
return self.prefix(__A ) - self.prefix(__A )
def __lowerCAmelCase ( self , __A ) -> int:
return self.query(__A , index + 1 )
def __lowerCAmelCase ( self , __A ) -> int:
value -= self.tree[0]
if value < 0:
return -1
lowerCAmelCase_ :Union[str, Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCAmelCase_ :str = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1
|
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _snake_case ( lowercase__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
lowerCAmelCase_ :Dict = f"""https://www.amazon.in/laptop/s?k={product}"""
lowerCAmelCase_ :List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
lowerCAmelCase_ :List[Any] = BeautifulSoup(requests.get(lowercase__ , headers=lowercase__ ).text )
# Initialize a Pandas dataframe with the column titles
lowerCAmelCase_ :Union[str, Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
lowerCAmelCase_ :str = item.ha.text
lowerCAmelCase_ :Dict = """https://www.amazon.in/""" + item.ha.a["""href"""]
lowerCAmelCase_ :int = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
lowerCAmelCase_ :Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
lowerCAmelCase_ :int = """Not available"""
try:
lowerCAmelCase_ :str = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
lowerCAmelCase_ :Optional[Any] = """"""
try:
lowerCAmelCase_ :str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_0_0 )
except ValueError:
lowerCAmelCase_ :Union[str, Any] = float("""nan""" )
except AttributeError:
pass
lowerCAmelCase_ :Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCAmelCase_ :List[Any] = """ """
lowerCAmelCase_ :Tuple = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__UpperCAmelCase = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 1
| 1
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[int] = '''pix2struct_text_model'''
__lowercase : str = ['''past_key_values''']
__lowercase : Dict = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self ,__UpperCAmelCase=5_0244 ,__UpperCAmelCase=768 ,__UpperCAmelCase=64 ,__UpperCAmelCase=2048 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase=32 ,__UpperCAmelCase=128 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=1E-6 ,__UpperCAmelCase=1.0 ,__UpperCAmelCase="gelu_new" ,__UpperCAmelCase=0 ,__UpperCAmelCase=False ,__UpperCAmelCase=0 ,__UpperCAmelCase=1 ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,**__UpperCAmelCase ,) -> Tuple:
lowerCAmelCase__ : Optional[Any] = vocab_size
lowerCAmelCase__ : List[Any] = hidden_size
lowerCAmelCase__ : List[Any] = d_kv
lowerCAmelCase__ : Optional[Any] = d_ff
lowerCAmelCase__ : Optional[int] = num_layers
lowerCAmelCase__ : Optional[Any] = num_heads
lowerCAmelCase__ : str = relative_attention_num_buckets
lowerCAmelCase__ : Dict = relative_attention_max_distance
lowerCAmelCase__ : Optional[Any] = dropout_rate
lowerCAmelCase__ : Optional[int] = layer_norm_epsilon
lowerCAmelCase__ : List[str] = initializer_factor
lowerCAmelCase__ : Dict = use_cache
lowerCAmelCase__ : int = eos_token_id
lowerCAmelCase__ : Optional[int] = decoder_start_token_id
# for backwards compatibility
lowerCAmelCase__ : Optional[int] = dense_act_fn
super().__init__(
pad_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,decoder_start_token_id=__UpperCAmelCase ,tie_word_embeddings=__UpperCAmelCase ,is_decoder=__UpperCAmelCase ,**__UpperCAmelCase ,)
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ,**__UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Dict = cls.get_config_dict(__UpperCAmelCase ,**__UpperCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
lowerCAmelCase__ : List[str] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase ,**__UpperCAmelCase )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Tuple = '''pix2struct_vision_model'''
def __init__( self ,__UpperCAmelCase=768 ,__UpperCAmelCase=768 ,__UpperCAmelCase=2048 ,__UpperCAmelCase=64 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase="gelu_new" ,__UpperCAmelCase=1E-6 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=1E-10 ,__UpperCAmelCase=1.0 ,__UpperCAmelCase=4096 ,__UpperCAmelCase=32 ,__UpperCAmelCase=128 ,**__UpperCAmelCase ,) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : Dict = patch_embed_hidden_size
lowerCAmelCase__ : List[Any] = d_ff
lowerCAmelCase__ : Dict = dropout_rate
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : List[Any] = initializer_factor
lowerCAmelCase__ : Dict = attention_dropout
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : str = dense_act_fn
lowerCAmelCase__ : Tuple = seq_len
lowerCAmelCase__ : List[Any] = relative_attention_num_buckets
lowerCAmelCase__ : Any = relative_attention_max_distance
lowerCAmelCase__ : List[Any] = d_kv
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ,**__UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Any = cls.get_config_dict(__UpperCAmelCase ,**__UpperCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
lowerCAmelCase__ : Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase ,**__UpperCAmelCase )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = '''pix2struct'''
__lowercase : str = True
def __init__( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=1.0 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,**__UpperCAmelCase ,) -> Tuple:
super().__init__(tie_word_embeddings=__UpperCAmelCase ,is_encoder_decoder=__UpperCAmelCase ,**__UpperCAmelCase )
if text_config is None:
lowerCAmelCase__ : Union[str, Any] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
lowerCAmelCase__ : List[Any] = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
lowerCAmelCase__ : Optional[Any] = PixaStructTextConfig(**__UpperCAmelCase )
lowerCAmelCase__ : Any = PixaStructVisionConfig(**__UpperCAmelCase )
lowerCAmelCase__ : Tuple = self.text_config.decoder_start_token_id
lowerCAmelCase__ : List[str] = self.text_config.pad_token_id
lowerCAmelCase__ : Any = self.text_config.eos_token_id
lowerCAmelCase__ : Dict = initializer_factor
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : int = self.initializer_range
lowerCAmelCase__ : Tuple = self.initializer_range
lowerCAmelCase__ : Optional[int] = is_vqa
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : str = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ : Any = self.text_config.to_dict()
lowerCAmelCase__ : Optional[Any] = self.vision_config.to_dict()
lowerCAmelCase__ : Any = self.__class__.model_type
return output
| 37
|
from maths.prime_check import is_prime
def snake_case_ ( lowerCAmelCase_ : int ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : Dict = F"Input value of [number={number}] must be an integer"
raise TypeError(lowerCAmelCase_ )
if is_prime(lowerCAmelCase_ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 233
| 0
|
from math import pi
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> float:
"""simple docstring"""
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 62
|
from string import ascii_lowercase, ascii_uppercase
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> str:
"""simple docstring"""
if not sentence:
return ""
UpperCamelCase :str = dict(zip(__magic_name__ , __magic_name__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 62
| 1
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : List[str] = 16
lowercase__ : Any = 32
def __lowercase ( _a , _a = 16 ):
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case_ : str = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_a ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_a , max_length=_a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : int = datasets.map(
_a , batched=_a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : Dict = 16
elif accelerator.mixed_precision != "no":
snake_case_ : int = 8
else:
snake_case_ : Dict = None
return tokenizer.pad(
_a , padding='''longest''' , max_length=_a , pad_to_multiple_of=_a , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case_ : Tuple = DataLoader(
tokenized_datasets['''train'''] , shuffle=_a , collate_fn=_a , batch_size=_a )
snake_case_ : Dict = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_a , collate_fn=_a , batch_size=_a )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : Optional[int] = mocked_dataloaders # noqa: F811
def __lowercase ( _a , _a ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _a ) == "1":
snake_case_ : Optional[int] = 2
# New Code #
snake_case_ : Union[str, Any] = int(args.gradient_accumulation_steps )
snake_case_ : List[Any] = int(args.local_sgd_steps )
# Initialize accelerator
snake_case_ : Any = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_a )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : Optional[int] = config['''lr''']
snake_case_ : Dict = int(config['''num_epochs'''] )
snake_case_ : Optional[int] = int(config['''seed'''] )
snake_case_ : Optional[int] = int(config['''batch_size'''] )
snake_case_ : Optional[int] = evaluate.load('''glue''' , '''mrpc''' )
set_seed(_a )
snake_case_, snake_case_ : Optional[int] = get_dataloaders(_a , _a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Tuple = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : List[str] = AdamW(params=model.parameters() , lr=_a )
# Instantiate scheduler
snake_case_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_a , num_warmup_steps=100 , num_training_steps=(len(_a ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Union[str, Any] = accelerator.prepare(
_a , _a , _a , _a , _a )
# Now we train the model
for epoch in range(_a ):
model.train()
with LocalSGD(
accelerator=_a , model=_a , local_sgd_steps=_a , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_a ):
snake_case_ : int = model(**_a )
snake_case_ : List[str] = output.loss
accelerator.backward(_a )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : int = model(**_a )
snake_case_ : Optional[int] = outputs.logits.argmax(dim=-1 )
snake_case_, snake_case_ : List[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_a , references=_a , )
snake_case_ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _a )
def __lowercase ( ):
snake_case_ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_a , default=_a , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_a , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=_a , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
snake_case_ : Dict = parser.parse_args()
snake_case_ : Optional[Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_a , _a )
if __name__ == "__main__":
main()
| 264
|
"""simple docstring"""
from __future__ import annotations
def __lowercase ( _a , _a , _a , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=1_3 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Dict=9_9 , UpperCAmelCase_ : List[Any]=3_2 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : Optional[int]=3_7 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : str=5_1_2 , UpperCAmelCase_ : List[Any]=1_6 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[int]="None" , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : List[Any]=None , ):
"""simple docstring"""
a : Optional[Any] = parent
a : List[Any] = batch_size
a : Tuple = seq_length
a : Optional[Any] = is_training
a : str = use_input_mask
a : List[Any] = use_token_type_ids
a : Any = use_labels
a : Any = vocab_size
a : Dict = hidden_size
a : List[str] = num_hidden_layers
a : Tuple = num_attention_heads
a : List[str] = intermediate_size
a : Union[str, Any] = hidden_act
a : Any = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : Optional[Any] = max_position_embeddings
a : Any = type_vocab_size
a : List[str] = type_sequence_label_size
a : Dict = initializer_range
a : List[Any] = num_labels
a : Dict = num_choices
a : Union[str, Any] = relative_attention
a : List[str] = position_biased_input
a : int = pos_att_type
a : Optional[int] = scope
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : int = None
if self.use_input_mask:
a : str = random_attention_mask([self.batch_size, self.seq_length])
a : Optional[int] = None
if self.use_token_type_ids:
a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a : Union[str, Any] = None
a : Optional[int] = None
a : Any = None
if self.use_labels:
a : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : List[Any] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : List[Any] = TFDebertaVaModel(config=_a)
a : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
a : int = [input_ids, input_mask]
a : List[str] = model(_a)
a : Dict = model(_a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple):
"""simple docstring"""
a : Union[str, Any] = TFDebertaVaForMaskedLM(config=_a)
a : Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
a : Any = model(_a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : Union[str, Any] = self.num_labels
a : List[Any] = TFDebertaVaForSequenceClassification(config=_a)
a : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
a : Union[str, Any] = model(_a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = self.num_labels
a : str = TFDebertaVaForTokenClassification(config=_a)
a : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
a : List[str] = model(_a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : List[Any] = TFDebertaVaForQuestionAnswering(config=_a)
a : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
a : Dict = model(_a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = self.prepare_config_and_inputs()
(
a
) : str = config_and_inputs
a : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A : int = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
A : int = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Union[str, Any] = False
A : List[str] = False
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Dict = TFDebertaVaModelTester(self)
a : Any = ConfigTester(self , config_class=_a , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a)
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Optional[Any] = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge')
self.assertIsNotNone(_a)
@require_tf
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet')
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[Any] = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge')
a : Optional[Any] = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]])
a : Optional[Any] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
a : Tuple = model(_a , attention_mask=_a)[0]
a : List[str] = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]])
tf.debugging.assert_near(output[:, 1:4, 1:4] , _a , atol=1e-4)
| 362
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Tuple = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 242
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCAmelCase_ ( snake_case_ ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_A : str = k.replace(snake_case_,snake_case_ )
if k.startswith("""encoder""" ):
_A : Optional[Any] = k.replace(""".attn""",""".self_attn""" )
_A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" )
elif k.startswith("""decoder""" ):
_A : str = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" )
_A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" )
return k
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
_A : str = sd.pop(snake_case_ )
_A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" )
assert new_k not in sd
_A : Optional[int] = v
_snake_case = ["START"]
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
_A : List[Any] = model["""model"""]
_A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ )
_A : List[str] = BlenderbotForConditionalGeneration(snake_case_ )
_A : Tuple = m.model.state_dict().keys()
_A : Any = []
_A : Dict = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_A : Optional[int] = rename_state_dict_key(snake_case_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_A : Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case_ )
m.model.load_state_dict(snake_case_,strict=snake_case_ )
m.half()
m.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
_snake_case = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
SCREAMING_SNAKE_CASE__ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
SCREAMING_SNAKE_CASE__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str ):
'''simple docstring'''
with open(lowerCAmelCase__ , "rb" ) as f:
lowercase_ = Image.open(lowerCAmelCase__ )
return im.convert("RGB" )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = field(
default=_lowerCAmelCase , metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} , )
lowerCAmelCase__ = field(
default=_lowerCAmelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase__ = field(default=_lowerCAmelCase , metadata={"help": "A folder containing the training data."} )
lowerCAmelCase__ = field(default=_lowerCAmelCase , metadata={"help": "A folder containing the validation data."} )
lowerCAmelCase__ = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
lowerCAmelCase__ = field(
default=_lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase__ = field(
default=_lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def A__ ( self ) -> Tuple:
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = field(
default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowerCAmelCase__ = field(
default=_lowerCAmelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_lowerCAmelCase )} , )
lowerCAmelCase__ = field(
default=_lowerCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase__ = field(
default=_lowerCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
lowerCAmelCase__ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase__ = field(default=_lowerCAmelCase , metadata={"help": "Name or path of preprocessor config."} )
lowerCAmelCase__ = field(
default=_lowerCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCAmelCase__ = field(
default=_lowerCAmelCase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict ):
'''simple docstring'''
lowercase_ = torch.stack([example["pixel_values"] for example in examples] )
lowercase_ = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , lowerCAmelCase__ , lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowercase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase_ = {}
if data_args.train_dir is not None:
lowercase_ = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
lowercase_ = os.path.join(data_args.validation_dir , "**" )
lowercase_ = load_dataset(
"imagefolder" , data_files=lowerCAmelCase__ , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ = None if """validation""" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCAmelCase__ ) and data_args.train_val_split > 0.0:
lowercase_ = dataset["""train"""].train_test_split(data_args.train_val_split )
lowercase_ = split["""train"""]
lowercase_ = split["""test"""]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase_ = dataset["""train"""].features["""labels"""].names
lowercase_ = {}, {}
for i, label in enumerate(lowerCAmelCase__ ):
lowercase_ = str(lowerCAmelCase__ )
lowercase_ = label
# Load the accuracy metric from the datasets package
lowercase_ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCamelCase: Dict ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
lowercase_ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase__ ) , labelaid=lowerCAmelCase__ , idalabel=lowerCAmelCase__ , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase_ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
lowercase_ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
lowercase_ = image_processor.size["""shortest_edge"""]
else:
lowercase_ = (image_processor.size["""height"""], image_processor.size["""width"""])
lowercase_ = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
lowercase_ = Compose(
[
RandomResizedCrop(lowerCAmelCase__ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
lowercase_ = Compose(
[
Resize(lowerCAmelCase__ ),
CenterCrop(lowerCAmelCase__ ),
ToTensor(),
normalize,
] )
def train_transforms(__lowerCamelCase: List[str] ):
lowercase_ = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["""image"""]
]
return example_batch
def val_transforms(__lowerCamelCase: Optional[Any] ):
lowercase_ = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["""image"""]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
lowercase_ = (
dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowerCAmelCase__ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
lowercase_ = (
dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowerCAmelCase__ )
# Initalize our trainer
lowercase_ = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , )
# Training
if training_args.do_train:
lowercase_ = None
if training_args.resume_from_checkpoint is not None:
lowercase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ = last_checkpoint
lowercase_ = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ = trainer.evaluate()
trainer.log_metrics("eval" , lowerCAmelCase__ )
trainer.save_metrics("eval" , lowerCAmelCase__ )
# Write model card and (optionally) push to hub
lowercase_ = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """image-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""image-classification""", """vision"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 366
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(UpperCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(UpperCAmelCase ):
self.assertDictEqual(UpperCAmelCase , example_records[i] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
lowercase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def A__ ( self ) -> Any: # checks what happens with missing columns
'''simple docstring'''
lowercase_ = [{"col_1": 1}, {"col_2": "x"}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def A__ ( self ) -> List[Any]: # checks if the type can be inferred from the second record
'''simple docstring'''
lowercase_ = [{"col_1": []}, {"col_1": [1, 2]}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = Dataset.from_list([] )
self.assertEqual(len(UpperCAmelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 297
| 0
|
'''simple docstring'''
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
__lowercase = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
__lowercase = logging.WARNING
def snake_case__ ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = os.getenv("""DATASETS_VERBOSITY""" , __lowerCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option DATASETS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def snake_case__ ( ) -> List[str]:
'''simple docstring'''
return __name__.split(""".""" )[0]
def snake_case__ ( ) -> int:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def snake_case__ ( ) -> Any:
'''simple docstring'''
lowerCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def snake_case__ ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def snake_case__ ( _A: Optional[str] = None ) -> Optional[int]:
'''simple docstring'''
if name is None:
lowerCAmelCase = _get_library_name()
return logging.getLogger(__lowerCAmelCase )
def snake_case__ ( ) -> Union[str, Any]:
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def snake_case__ ( _A: int ) -> Optional[int]:
'''simple docstring'''
_get_library_root_logger().setLevel(__lowerCAmelCase )
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
return set_verbosity(__lowerCAmelCase )
def snake_case__ ( ) -> Tuple:
'''simple docstring'''
return set_verbosity(__lowerCAmelCase )
def snake_case__ ( ) -> List[str]:
'''simple docstring'''
return set_verbosity(__lowerCAmelCase )
def snake_case__ ( ) -> Tuple:
'''simple docstring'''
return set_verbosity(__lowerCAmelCase )
def snake_case__ ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase = False
def snake_case__ ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class a__:
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase): # pylint: disable=unused-argument
"""simple docstring"""
lowerCAmelCase = args[0] if args else None
def __iter__( self):
"""simple docstring"""
return iter(self._iterator)
def __getattr__( self , __lowerCAmelCase):
"""simple docstring"""
def empty_fn(*__lowerCAmelCase , **__lowerCAmelCase): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self):
"""simple docstring"""
return self
def __exit__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
return
__lowercase = True
class a__:
'''simple docstring'''
def __call__( self , *__lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase):
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*_UpperCamelCase , **_UpperCamelCase)
else:
return EmptyTqdm(*_UpperCamelCase , **_UpperCamelCase)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_UpperCamelCase , **_UpperCamelCase)
def a_ ( self):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowercase = _tqdm_cls()
def snake_case__ ( ) -> Union[str, Any]:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def snake_case__ ( ) -> Any:
'''simple docstring'''
global _tqdm_active
lowerCAmelCase = True
def snake_case__ ( ) -> Optional[int]:
'''simple docstring'''
global _tqdm_active
lowerCAmelCase = False
| 272
|
from pathlib import Path
import fire
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = Path(__lowerCAmelCase )
lowerCAmelCase_ = Path(__lowerCAmelCase )
dest_dir.mkdir(exist_ok=__lowerCAmelCase )
for path in src_dir.iterdir():
lowerCAmelCase_ = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowerCAmelCase_ = dest_dir.joinpath(path.name )
print(__lowerCAmelCase )
dest_path.open("w" ).write("\n".join(__lowerCAmelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 231
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_a : Any= {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_a : Dict= {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
_a : Optional[int]= {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
_a : Union[str, Any]= {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class UpperCamelCase ( lowercase ):
UpperCAmelCase : str = VOCAB_FILES_NAMES
UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : List[Any] = ElectraTokenizer
def __init__(self : int , _A : int=None , _A : str=None , _A : Optional[int]=True , _A : Optional[int]="[UNK]" , _A : List[Any]="[SEP]" , _A : str="[PAD]" , _A : Union[str, Any]="[CLS]" , _A : List[Any]="[MASK]" , _A : Optional[Any]=True , _A : Any=None , **_A : Dict , ) -> str:
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
__snake_case : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , _A) != do_lower_case
or normalizer_state.get('strip_accents' , _A) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _A) != tokenize_chinese_chars
):
__snake_case : int = getattr(_A , normalizer_state.pop('type'))
__snake_case : Optional[int] = do_lower_case
__snake_case : List[str] = strip_accents
__snake_case : List[Any] = tokenize_chinese_chars
__snake_case : Union[str, Any] = normalizer_class(**_A)
__snake_case : Optional[int] = do_lower_case
def _lowercase (self : int , _A : Optional[int] , _A : str=None) -> Dict:
__snake_case : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase (self : Tuple , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
__snake_case : Optional[Any] = [self.sep_token_id]
__snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowercase (self : str , _A : str , _A : Optional[str] = None) -> Tuple[str]:
__snake_case : Optional[int] = self._tokenizer.model.save(_A , name=_A)
return tuple(_A)
| 95
|
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def __UpperCAmelCase ( UpperCAmelCase_ : Iterable[str] , UpperCAmelCase_ : int ) -> Generator[tuple[str, ...], None, None]:
'''simple docstring'''
__snake_case : Optional[int] = iter(UpperCAmelCase_ )
while True:
__snake_case : Optional[int] = tuple(itertools.islice(UpperCAmelCase_ , UpperCAmelCase_ ) )
if not chunk:
return
yield chunk
def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
__snake_case : Any = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
__snake_case : Union[str, Any] = ''
if len(UpperCAmelCase_ ) < 2:
return dirty
for i in range(len(UpperCAmelCase_ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCAmelCase_ ) & 1:
clean += "X"
return clean
def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> list[str]:
'''simple docstring'''
__snake_case : List[str] = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__snake_case : Optional[int] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCAmelCase_ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCAmelCase_ )
return table
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
__snake_case : str = generate_table(UpperCAmelCase_ )
__snake_case : Union[str, Any] = prepare_input(UpperCAmelCase_ )
__snake_case : Tuple = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase_ , 2 ):
__snake_case , __snake_case : Any = divmod(table.index(UpperCAmelCase_ ) , 5 )
__snake_case , __snake_case : Tuple = divmod(table.index(UpperCAmelCase_ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
__snake_case : Optional[int] = generate_table(UpperCAmelCase_ )
__snake_case : Any = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase_ , 2 ):
__snake_case , __snake_case : Union[str, Any] = divmod(table.index(UpperCAmelCase_ ) , 5 )
__snake_case , __snake_case : Tuple = divmod(table.index(UpperCAmelCase_ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 95
| 1
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : List[str] = len(_lowerCAmelCase ) + 1
lowercase__ : Any = len(_lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowercase__ : List[str] = [[0 for i in range(_lowerCAmelCase )] for j in range(_lowerCAmelCase )]
# since string of zero length match pattern of zero length
lowercase__ : Any = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _lowerCAmelCase ):
lowercase__ : Tuple = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _lowerCAmelCase ):
lowercase__ : Union[str, Any] = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _lowerCAmelCase ):
for j in range(1 , _lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowercase__ : List[Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowercase__ : Union[str, Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowercase__ : Tuple = dp[i - 1][j]
else:
lowercase__ : Tuple = 0
else:
lowercase__ : List[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_UpperCamelCase : Any = "aab"
_UpperCamelCase : int = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 77
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : Dict = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Dict = {
"gpt2": 1_0_2_4,
"gpt2-medium": 1_0_2_4,
"gpt2-large": 1_0_2_4,
"gpt2-xl": 1_0_2_4,
"distilgpt2": 1_0_2_4,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
lowercase = GPTaTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('add_bos_token' , __UpperCAmelCase )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
__UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**__UpperCAmelCase )
__UpperCamelCase = add_prefix_space
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] )
if len(__UpperCAmelCase ) > self.model_max_length:
__UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 316
| 0
|
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=0 ):
# Format the message.
if name is None:
_A : Union[str, Any] = None
else:
_A : Dict = """.""" * max(0,spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
_A : Tuple = fmt.format(snake_case_ )
# Print and recurse (if needed).
if isinstance(snake_case_,snake_case_ ):
if msg is not None:
print(snake_case_ )
for k in val.keys():
recursive_print(snake_case_,val[k],spaces + 2 )
elif isinstance(snake_case_,torch.Tensor ):
print(snake_case_,""":""",val.size() )
else:
print(snake_case_,""":""",snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_A : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_A : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_A : Tuple = param.view(*snake_case_ )
_A : Any = param.transpose(0,2 )
_A : int = param.transpose(1,2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_A : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
_A : int = param.view(*snake_case_ )
_A : Any = param.transpose(0,1 ).contiguous()
_A : Optional[int] = param.view(*snake_case_ )
return param
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# The converted output model.
_A : Any = {}
# old versions did not store training args
_A : str = input_state_dict.get("""args""",snake_case_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_A : Union[str, Any] = ds_args.padded_vocab_size
_A : List[Any] = ds_args.max_position_embeddings
_A : Optional[int] = ds_args.hidden_size
_A : List[Any] = ds_args.num_layers
_A : List[str] = ds_args.num_attention_heads
_A : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_A : Union[str, Any] = config.n_head
# The hidden_size per head.
_A : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_A : Tuple = input_state_dict["""checkpoint_version"""]
else:
_A : Any = 0.0
# The model.
_A : Any = input_state_dict["""model"""]
# The language model.
_A : Tuple = model["""language_model"""]
# The embeddings.
_A : Any = lm["""embedding"""]
# The word embeddings.
_A : Dict = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
_A : Union[str, Any] = word_embeddings[: config.vocab_size, :]
_A : Tuple = word_embeddings
# The position embeddings.
_A : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_A : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
_A : Optional[int] = pos_embeddings
# The transformer.
_A : Any = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
_A : Optional[int] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
_A : Union[str, Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_A : List[str] = layer_re.match(snake_case_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_A : Tuple = int(m.group(1 ) )
# The name of the operation.
_A : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
_A : Dict = m.group(3 )
# The name of the layer.
_A : Optional[Any] = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
_A : Union[str, Any] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
_A : List[str] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_A : List[str] = torch.tril(torch.ones((n_positions, n_positions),dtype=torch.floataa ) ).view(
1,1,snake_case_,snake_case_ )
_A : Any = causal_mask
# Insert a "dummy" tensor for masked_bias.
_A : List[str] = torch.tensor(-1e4,dtype=torch.floataa )
_A : Tuple = masked_bias
_A : Tuple = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_A : Tuple = out_val.transpose(0,1 ).contiguous()
# Store.
_A : Any = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_A : List[str] = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Store. No change of shape.
_A : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_A : List[str] = megatron_to_transformers[op_name]
_A : Any = val.transpose(0,1 )
# Copy the bias.
elif weight_or_bias == "bias":
_A : Dict = megatron_to_transformers[op_name]
_A : List[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_A : Optional[Any] = transformer["""final_layernorm.weight"""]
_A : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
_A : List[str] = word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase_ ( ):
# Create the argument parser.
_A : Any = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""",action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""",type=snake_case_,help="""Path to the checkpoint file (.zip archive or direct .pt file)""",)
parser.add_argument(
"""--config_file""",default="""""",type=snake_case_,help="""An optional config json file describing the pre-trained model.""",)
_A : Optional[int] = parser.parse_args()
# Extract the basename.
_A : Any = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint,"""r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
else:
_A : Tuple = torch.load(args.path_to_checkpoint,map_location="""cpu""" )
_A : Optional[Any] = input_state_dict.get("""args""",snake_case_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_A : Union[str, Any] = """gelu_fast"""
elif ds_args.openai_gelu:
_A : int = """gelu_new"""
else:
_A : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
_A : Any = """gelu_new"""
# Spell out all parameters in case the defaults change.
_A : Any = GPTaConfig(
vocab_size=50257,n_positions=1024,n_embd=1024,n_layer=24,n_head=16,n_inner=4096,activation_function=snake_case_,resid_pdrop=0.1,embd_pdrop=0.1,attn_pdrop=0.1,layer_norm_epsilon=1e-5,initializer_range=0.02,summary_type="""cls_index""",summary_use_proj=snake_case_,summary_activation=snake_case_,summary_proj_to_labels=snake_case_,summary_first_dropout=0.1,scale_attn_weights=snake_case_,use_cache=snake_case_,bos_token_id=50256,eos_token_id=50256,)
else:
_A : Union[str, Any] = GPTaConfig.from_json_file(args.config_file )
_A : List[str] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
_A : Optional[Any] = convert_megatron_checkpoint(snake_case_,snake_case_,snake_case_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(snake_case_,snake_case_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_A : int = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_A : Any = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
_A : List[Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
_A : Optional[Any] = """gpt2"""
_A : List[str] = AutoTokenizer.from_pretrained(snake_case_ )
_A : Tuple = type(snake_case_ ).__name__
_A : Union[str, Any] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(snake_case_ )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(snake_case_ )
# Store the state_dict to file.
_A : Union[str, Any] = os.path.join(snake_case_,"""pytorch_model.bin""" )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(snake_case_,snake_case_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 356
|
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ) -> Union[str, Any]:
_A : Optional[int] = parent
_A : Dict = batch_size
_A : Any = image_size
_A : Optional[int] = patch_size
_A : Optional[int] = num_channels
_A : List[Any] = is_training
_A : Optional[Any] = use_labels
_A : Any = hidden_size
_A : Any = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : int = intermediate_size
_A : Dict = hidden_act
_A : Optional[int] = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Any = type_sequence_label_size
_A : str = initializer_range
_A : Tuple = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A : List[Any] = (image_size // patch_size) ** 2
_A : str = num_patches + 1
def a__ ( self ) -> Dict:
_A : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : List[str] = None
if self.use_labels:
_A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : List[Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> Union[str, Any]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a__ ( self , _a , _a , _a ) -> Dict:
_A : List[str] = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_A : List[str] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a ) -> List[str]:
_A : Union[str, Any] = self.type_sequence_label_size
_A : Tuple = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a , labels=_a )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A : Dict = 1
_A : str = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A : int = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ) -> Any:
_A : Optional[int] = self.prepare_config_and_inputs()
_A , _A , _A : Dict = config_and_inputs
_A : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_a = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Tuple:
_A : Tuple = ViTMSNModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def a__ ( self ) -> int:
pass
def a__ ( self ) -> Any:
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Tuple = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def a__ ( self ) -> str:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(_a )
_A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : str = [*signature.parameters.keys()]
_A : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> List[Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Any:
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> int:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : int = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> int:
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[int]:
torch.manual_seed(2 )
_A : Tuple = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a )
_A : Tuple = self.default_image_processor
_A : Dict = prepare_img()
_A : Optional[Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : int = model(**_a )
# verify the logits
_A : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Optional[int] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 343
| 0
|
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__snake_case = """hf-internal-testing/tiny-random-bert"""
__snake_case = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""")
__snake_case = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class lowercase__ ( unittest.TestCase ):
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = cached_file(UpperCAmelCase_ , UpperCAmelCase_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCAmelCase_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
with open(os.path.join(UpperCAmelCase_ , 'refs' , 'main' ) ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , 'snapshots' , UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertTrue(os.path.isfile(UpperCAmelCase_ ) )
# File is cached at the same place the second time.
SCREAMING_SNAKE_CASE__ = cached_file(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Using a specific revision to test the full commit hash.
SCREAMING_SNAKE_CASE__ = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='9b8c223' )
self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , 'snapshots' , UpperCAmelCase_ , UpperCAmelCase_ ) )
def A_ ( self : Any ):
with self.assertRaisesRegex(UpperCAmelCase_ , 'is not a valid model identifier' ):
SCREAMING_SNAKE_CASE__ = cached_file('tiny-random-bert' , UpperCAmelCase_ )
with self.assertRaisesRegex(UpperCAmelCase_ , 'is not a valid git identifier' ):
SCREAMING_SNAKE_CASE__ = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='aaaa' )
with self.assertRaisesRegex(UpperCAmelCase_ , 'does not appear to have a file named' ):
SCREAMING_SNAKE_CASE__ = cached_file(UpperCAmelCase_ , 'conf' )
def A_ ( self : Optional[int] ):
with self.assertRaisesRegex(UpperCAmelCase_ , 'does not appear to have a file named' ):
SCREAMING_SNAKE_CASE__ = cached_file(UpperCAmelCase_ , 'conf' )
with open(os.path.join(UpperCAmelCase_ , 'refs' , 'main' ) ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , '.no_exist' , UpperCAmelCase_ , 'conf' ) ) )
SCREAMING_SNAKE_CASE__ = cached_file(UpperCAmelCase_ , 'conf' , _raise_exceptions_for_missing_entries=UpperCAmelCase_ )
self.assertIsNone(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = cached_file(UpperCAmelCase_ , 'conf' , local_files_only=UpperCAmelCase_ , _raise_exceptions_for_missing_entries=UpperCAmelCase_ )
self.assertIsNone(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = mock.Mock()
SCREAMING_SNAKE_CASE__ = 500
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = HTTPError
SCREAMING_SNAKE_CASE__ = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=UpperCAmelCase_ ) as mock_head:
SCREAMING_SNAKE_CASE__ = cached_file(UpperCAmelCase_ , 'conf' , _raise_exceptions_for_connection_errors=UpperCAmelCase_ )
self.assertIsNone(UpperCAmelCase_ )
# This check we did call the fake head request
mock_head.assert_called()
def A_ ( self : Tuple ):
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , UpperCAmelCase_ ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , UpperCAmelCase_ ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , UpperCAmelCase_ ) )
def A_ ( self : Dict ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ , 'is not a valid model identifier' ):
get_file_from_repo('bert-base-case' , UpperCAmelCase_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ , 'is not a valid git identifier' ):
get_file_from_repo('bert-base-cased' , UpperCAmelCase_ , revision='ahaha' )
SCREAMING_SNAKE_CASE__ = get_file_from_repo('bert-base-cased' , UpperCAmelCase_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
SCREAMING_SNAKE_CASE__ = json.loads(open(UpperCAmelCase_ , 'r' ).read() )
self.assertEqual(config['hidden_size'] , 768 )
def A_ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = Path(UpperCAmelCase_ ) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(UpperCAmelCase_ , 'a.txt' ) , str(UpperCAmelCase_ ) )
self.assertIsNone(get_file_from_repo(UpperCAmelCase_ , 'b.txt' ) )
| 176
|
import os
def _lowercase ( ) -> List[str]:
'''simple docstring'''
with open(os.path.dirname(UpperCamelCase_ ) + '/p022_names.txt' ) as file:
SCREAMING_SNAKE_CASE__ = str(file.readlines()[0] )
SCREAMING_SNAKE_CASE__ = names.replace('"' , '' ).split(',' )
names.sort()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i, name in enumerate(UpperCamelCase_ ):
for letter in name:
name_score += ord(UpperCamelCase_ ) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE__ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 176
| 1
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
UpperCAmelCase_ = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ = None, __magic_name__ = None ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = None
UpperCamelCase__ : str = os.path.abspath(os.path.join('''examples''', '''by_feature''' ) )
UpperCamelCase__ : List[str] = os.path.abspath('''examples''' )
for item in os.listdir(__magic_name__ ):
if item not in EXCLUDE_EXAMPLES:
UpperCamelCase__ : Tuple = os.path.join(__magic_name__, __magic_name__ )
if os.path.isfile(__magic_name__ ) and ".py" in item_path:
with self.subTest(
tested_script=__magic_name__, feature_script=__magic_name__, tested_section='''main()''' if parser_only else '''training_function()''', ):
UpperCamelCase__ : List[Any] = compare_against_test(
os.path.join(__magic_name__, __magic_name__ ), __magic_name__, __magic_name__, __magic_name__ )
UpperCamelCase__ : Optional[int] = '''\n'''.join(__magic_name__ )
if special_strings is not None:
for string in special_strings:
UpperCamelCase__ : Tuple = diff.replace(__magic_name__, '''''' )
self.assertEqual(__magic_name__, '''''' )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
self.one_complete_example('''complete_nlp_example.py''', __magic_name__ )
self.one_complete_example('''complete_nlp_example.py''', __magic_name__ )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = os.path.abspath(os.path.join('''examples''', '''cv_example.py''' ) )
UpperCamelCase__ : Tuple = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''', __magic_name__, __magic_name__, __magic_name__ )
self.one_complete_example('''complete_cv_example.py''', __magic_name__, __magic_name__, __magic_name__ )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : List[str] = False
@classmethod
def UpperCamelCase__ ( cls ) -> List[Any]:
"""simple docstring"""
super().setUpClass()
UpperCamelCase__ : Optional[int] = tempfile.mkdtemp()
UpperCamelCase__ : Optional[Any] = os.path.join(cls._tmpdir, '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
UpperCamelCase__ : Optional[int] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def UpperCamelCase__ ( cls ) -> Union[str, Any]:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : str = f"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, '''epoch_0''' ) ) )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = f"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
UpperCamelCase__ : Dict = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, '''step_2''' ) ) )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : int = f"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, 'epoch_0' )}\n ".split()
UpperCamelCase__ : Any = run_command(self._launch_args + testargs, return_stdout=__magic_name__ )
self.assertNotIn('''epoch 0:''', __magic_name__ )
self.assertIn('''epoch 1:''', __magic_name__ )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Dict = f"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, 'step_2' )}\n ".split()
UpperCamelCase__ : List[str] = run_command(self._launch_args + testargs, return_stdout=__magic_name__ )
if torch.cuda.is_available():
UpperCamelCase__ : str = torch.cuda.device_count()
else:
UpperCamelCase__ : Tuple = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''', __magic_name__ )
self.assertIn('''epoch 1:''', __magic_name__ )
else:
self.assertIn('''epoch 0:''', __magic_name__ )
self.assertIn('''epoch 1:''', __magic_name__ )
@slow
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ, {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
UpperCamelCase__ : Dict = run_command(self._launch_args + testargs, return_stdout=__magic_name__ )
UpperCamelCase__ : Any = re.findall('''({.+})''', __magic_name__ )
UpperCamelCase__ : Any = [r for r in results if '''accuracy''' in r][-1]
UpperCamelCase__ : List[str] = ast.literal_eval(__magic_name__ )
self.assertGreaterEqual(results['''accuracy'''], 0.75 )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
UpperCamelCase__ : int = f"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__magic_name__, '''tracking''' ) ) )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 247
|
from collections import deque
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> str:
UpperCamelCase__ : Optional[int] = len(__UpperCAmelCase )
UpperCamelCase__ : str = deque()
UpperCamelCase__ : int = [False for _ in range(__UpperCAmelCase )]
UpperCamelCase__ : Optional[int] = [-1 for _ in range(__UpperCAmelCase )]
UpperCamelCase__ : str = index_of[:]
def strong_connect(__UpperCAmelCase: Optional[int] , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: Union[str, Any] ):
UpperCamelCase__ : str = index # the number when this node is seen
UpperCamelCase__ : Any = index # lowest rank node reachable from here
index += 1
stack.append(__UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = True
for w in g[v]:
if index_of[w] == -1:
UpperCamelCase__ : str = strong_connect(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : List[str] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCamelCase__ : Dict = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : str = stack.pop()
UpperCamelCase__ : int = False
component.append(__UpperCAmelCase )
while w != v:
UpperCamelCase__ : int = stack.pop()
UpperCamelCase__ : Optional[Any] = False
component.append(__UpperCAmelCase )
components.append(__UpperCAmelCase )
return index
UpperCamelCase__ : Optional[Any] = []
for v in range(__UpperCAmelCase ):
if index_of[v] == -1:
strong_connect(__UpperCAmelCase , 0 , __UpperCAmelCase )
return components
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: List[Any] ) -> str:
UpperCamelCase__ : Dict = [[] for _ in range(__UpperCAmelCase )]
for u, v in edges:
g[u].append(__UpperCAmelCase )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ = 7
UpperCAmelCase_ = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 247
| 1
|
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class snake_case__ ( UpperCamelCase):
def __init__( self : List[Any] , *_A : Any , **_A : List[str] ) -> str:
super().__init__(*_A , **_A )
def A ( self : List[str] , _A : Any , _A : Any ) -> int:
UpperCAmelCase_ : Union[str, Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_A )
UpperCAmelCase_ : Union[str, Any] = self.values[key]
def A ( self : int ) -> Union[str, Any]:
return (
sum(self.charge_factor - len(_A ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def A ( self : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any]=None ) -> int:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_A ) == 0
):
return key
return super()._collision_resolution(_A , _A )
| 304
|
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class snake_case__ ( unittest.TestCase):
def __init__( self : int , _A : List[str] , _A : Dict=7 , _A : List[str]=3 , _A : List[str]=18 , _A : Dict=30 , _A : Union[str, Any]=4_00 , _A : List[str]=True , _A : List[str]=None , _A : int=True , _A : Tuple=None , _A : Union[str, Any]=True , _A : Tuple=[0.5, 0.5, 0.5] , _A : Union[str, Any]=[0.5, 0.5, 0.5] , _A : Tuple=False , ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = size if size is not None else {'''height''': 20, '''width''': 20}
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Optional[Any] = image_size
UpperCAmelCase_ : Tuple = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : Optional[Any] = do_center_crop
UpperCAmelCase_ : Optional[int] = crop_size
UpperCAmelCase_ : Tuple = do_normalize
UpperCAmelCase_ : Optional[Any] = image_mean
UpperCAmelCase_ : int = image_std
UpperCAmelCase_ : List[Any] = do_reduce_labels
def A ( self : Union[str, Any] ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : Optional[Any] = Image.open(dataset[0]['''file'''] )
UpperCAmelCase_ : str = Image.open(dataset[1]['''file'''] )
return image, map
def __UpperCAmelCase ( ) -> Any:
UpperCAmelCase_ : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ : int = Image.open(ds[0]['''file'''] )
UpperCAmelCase_ : Optional[Any] = Image.open(ds[1]['''file'''] )
UpperCAmelCase_ : Dict = Image.open(ds[2]['''file'''] )
UpperCAmelCase_ : List[str] = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = BeitImageProcessor if is_vision_available() else None
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = BeitImageProcessingTester(self )
@property
def A ( self : List[Any] ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''center_crop''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
def A ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , _A )
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , _A )
def A ( self : Optional[Any] ) -> Any:
pass
def A ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Union[str, Any] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Optional[int] ) -> str:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self : Any ) -> Optional[Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
UpperCAmelCase_ : Union[str, Any] = []
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
UpperCAmelCase_ : str = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test not batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs()
UpperCAmelCase_ : List[str] = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = prepare_semantic_batch_inputs()
UpperCAmelCase_ : int = image_processing(_A , _A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
def A ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs()
UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_50 )
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
| 304
| 1
|
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Union[str, Any] , *lowercase_ : Optional[Any] , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , **lowercase_ : int ):
super().__init__(*lowercase_ , **lowercase_ )
lowercase_ : Dict = eval_examples
lowercase_ : List[Any] = post_process_function
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : str = "eval" ):
lowercase_ : Optional[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase_ : str = self.get_eval_dataloader(lowercase_ )
lowercase_ : Tuple = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase_ : Optional[Any] = self.compute_metrics
lowercase_ : List[Any] = None
lowercase_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowercase_ : Dict = time.time()
try:
lowercase_ : Any = eval_loop(
lowercase_ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
lowercase_ : int = compute_metrics
lowercase_ : List[Any] = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase_ : Optional[int] = self.post_process_function(lowercase_ , lowercase_ , output.predictions )
lowercase_ : Union[str, Any] = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
lowercase_ : str = metrics.pop(lowercase_ )
metrics.update(output.metrics )
else:
lowercase_ : List[str] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase_ : Optional[int] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_ )
return metrics
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Dict=None , lowercase_ : str = "test" ):
lowercase_ : Tuple = self.get_test_dataloader(lowercase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase_ : str = self.compute_metrics
lowercase_ : Union[str, Any] = None
lowercase_ : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowercase_ : Any = time.time()
try:
lowercase_ : Optional[int] = eval_loop(
lowercase_ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
lowercase_ : Optional[int] = compute_metrics
lowercase_ : int = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase_ : Any = self.post_process_function(lowercase_ , lowercase_ , output.predictions , """predict""" )
lowercase_ : Dict = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
lowercase_ : int = metrics.pop(lowercase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_ )
| 21
|
'''simple docstring'''
class __magic_name__ :
def __init__( self : int , lowercase_ : list ):
lowercase_ : Dict = set_counts
lowercase_ : List[Any] = max(lowercase_ )
lowercase_ : str = len(lowercase_ )
lowercase_ : str = [1] * num_sets
lowercase_ : Dict = list(range(lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : List[Any] = self.get_parent(lowercase_ )
lowercase_ : Union[str, Any] = self.get_parent(lowercase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase_ : List[str] = 0
lowercase_ : Optional[int] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase_ : int = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase_ : int = 0
lowercase_ : List[Any] = src_parent
lowercase_ : List[Any] = self.set_counts[src_parent]
lowercase_ : Tuple = max(self.max_set , lowercase_ )
return True
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : int ):
if self.parents[disj_set] == disj_set:
return disj_set
lowercase_ : int = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 21
| 1
|
'''simple docstring'''
from copy import deepcopy
class __A :
def __init__(self : Dict , __a : list[int] | None = None , __a : int | None = None ):
if arr is None and size is not None:
UpperCAmelCase_ = size
UpperCAmelCase_ = [0] * size
elif arr is not None:
self.init(__a )
else:
raise ValueError("Either arr or size must be specified" )
def _lowercase (self : int , __a : list[int] ):
UpperCAmelCase_ = len(__a )
UpperCAmelCase_ = deepcopy(__a )
for i in range(1 , self.size ):
UpperCAmelCase_ = self.next_(__a )
if j < self.size:
self.tree[j] += self.tree[i]
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCAmelCase_ = self.next_(__a )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _lowercase (__a : int ):
return index + (index & (-index))
@staticmethod
def _lowercase (__a : int ):
return index - (index & (-index))
def _lowercase (self : Tuple , __a : int , __a : int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCAmelCase_ = self.next_(__a )
def _lowercase (self : Optional[Any] , __a : int , __a : int ):
self.add(__a , value - self.get(__a ) )
def _lowercase (self : Dict , __a : int ):
if right == 0:
return 0
UpperCAmelCase_ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCAmelCase_ = self.prev(__a )
return result
def _lowercase (self : int , __a : int , __a : int ):
return self.prefix(__a ) - self.prefix(__a )
def _lowercase (self : str , __a : int ):
return self.query(__a , index + 1 )
def _lowercase (self : List[str] , __a : int ):
value -= self.tree[0]
if value < 0:
return -1
UpperCAmelCase_ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCAmelCase_ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : int | None = None , snake_case_ : int | None = None ) -> None:
'''simple docstring'''
if start is None:
UpperCAmelCase_ = 0
if end is None:
UpperCAmelCase_ = len(snake_case_ ) - 1
if start >= end:
return
UpperCAmelCase_ = (start + end) // 2
slowsort(snake_case_ , snake_case_ , snake_case_ )
slowsort(snake_case_ , mid + 1 , snake_case_ )
if sequence[end] < sequence[mid]:
UpperCAmelCase_ , UpperCAmelCase_ = sequence[mid], sequence[end]
slowsort(snake_case_ , snake_case_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class a_ ( a_ ):
'''simple docstring'''
__a: Dict = '''git_vision_model'''
def __init__( self , lowercase_=7_6_8 , lowercase_=3_0_7_2 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3 , lowercase_=2_2_4 , lowercase_=1_6 , lowercase_="quick_gelu" , lowercase_=1e-5 , lowercase_=0.0 , lowercase_=0.02 , **lowercase_ , ) -> int:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = hidden_act
@classmethod
def _lowercase ( cls , lowercase_ , **lowercase_ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowercase_ )
lowerCAmelCase_ , lowerCAmelCase_ = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
lowerCAmelCase_ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowercase_ , **lowercase_ )
class a_ ( a_ ):
'''simple docstring'''
__a: List[str] = '''git'''
def __init__( self , lowercase_=None , lowercase_=3_0_5_2_2 , lowercase_=7_6_8 , lowercase_=6 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1_0_2_4 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=0 , lowercase_="absolute" , lowercase_=True , lowercase_=False , lowercase_=1_0_1 , lowercase_=1_0_2 , lowercase_=None , **lowercase_ , ) -> Any:
'''simple docstring'''
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , pad_token_id=lowercase_ , **lowercase_ )
if vision_config is None:
lowerCAmelCase_ = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
lowerCAmelCase_ = GitVisionConfig(**lowercase_ )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = tie_word_embeddings
lowerCAmelCase_ = num_image_with_embedding
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = eos_token_id
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ = self.vision_config.to_dict()
lowerCAmelCase_ = self.__class__.model_type
return output
| 14
|
import baseaa
def lowerCamelCase ( a_ ) -> bytes:
return baseaa.baaencode(string.encode('utf-8' ) )
def lowerCamelCase ( a_ ) -> str:
return baseaa.baadecode(a_ ).decode('utf-8' )
if __name__ == "__main__":
lowerCamelCase_ = """Hello World!"""
lowerCamelCase_ = baseaa_encode(test)
print(encoded)
lowerCamelCase_ = baseaa_decode(encoded)
print(decoded)
| 14
| 1
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def a ( _UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = torch.load(_UpperCAmelCase , map_location='''cpu''' )
if "model" in sd.keys():
__UpperCAmelCase : Dict = torch.load(_UpperCAmelCase , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__UpperCAmelCase : int = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_UpperCAmelCase )
__UpperCAmelCase : List[str] = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__UpperCAmelCase : Tuple = sd.pop(_UpperCAmelCase )
__UpperCAmelCase : Tuple = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__UpperCAmelCase : Tuple = sd[key]
# We split QKV in separate Q,K,V
__UpperCAmelCase : List[str] = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__UpperCAmelCase : List[Any] = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__UpperCAmelCase : List[str] = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__UpperCAmelCase : Optional[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = torch.split(_UpperCAmelCase , depth // 3 , dim=0 )
__UpperCAmelCase : Dict = q
__UpperCAmelCase : Any = k
__UpperCAmelCase : Any = v
del sd[key]
return sd
@torch.no_grad()
def a ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any]=None ):
'''simple docstring'''
__UpperCAmelCase : int = load_checkpoint(_UpperCAmelCase )
if config is not None:
__UpperCAmelCase : Optional[Any] = OPTConfig.from_pretrained(_UpperCAmelCase )
else:
__UpperCAmelCase : str = OPTConfig()
__UpperCAmelCase : Any = OPTModel(_UpperCAmelCase ).half().eval()
model.load_state_dict(_UpperCAmelCase )
# Check results
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
__A =parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 226
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
__A =argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
__A , __A =parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
__A =rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
__A =rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
__A =args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 226
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Dict = logging.get_logger(__name__)
__snake_case :List[Any] = {
'''tanreinama/GPTSAN-2.8B-spout_is_uniform''': (
'''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'''
),
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[str] = '''gptsan-japanese'''
UpperCamelCase__ : Dict = [
'''past_key_values''',
]
UpperCamelCase__ : Dict = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=36_000 , __SCREAMING_SNAKE_CASE : Tuple=1_280 , __SCREAMING_SNAKE_CASE : List[Any]=1_024 , __SCREAMING_SNAKE_CASE : List[Any]=8_192 , __SCREAMING_SNAKE_CASE : str=4_096 , __SCREAMING_SNAKE_CASE : Any=128 , __SCREAMING_SNAKE_CASE : int=10 , __SCREAMING_SNAKE_CASE : Optional[int]=0 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : List[Any]=16 , __SCREAMING_SNAKE_CASE : Optional[Any]=128 , __SCREAMING_SNAKE_CASE : Tuple=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=1E-5 , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : List[str]="float32" , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : int=0.0_02 , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : int=35_998 , __SCREAMING_SNAKE_CASE : Optional[int]=35_995 , __SCREAMING_SNAKE_CASE : List[str]=35_999 , **__SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = d_ff
__a = d_ext
__a = d_spout
__a = num_switch_layers
__a = num_ext_layers
__a = num_switch_layers + num_ext_layers
__a = num_heads
__a = num_experts
__a = expert_capacity
__a = dropout_rate
__a = layer_norm_epsilon
__a = router_bias
__a = router_jitter_noise
__a = router_dtype
__a = router_ignore_padding_tokens
__a = output_hidden_states
__a = output_attentions
__a = initializer_factor
__a = output_router_logits
__a = use_cache
super().__init__(
separator_token_id=__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
| 131
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : str = KandinskyVaaImgaImgPipeline
UpperCamelCase__ : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''']
UpperCamelCase__ : Dict = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
UpperCamelCase__ : Any = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase__ : List[Any] = False
@property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Any):
'''simple docstring'''
torch.manual_seed(0)
__a = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__a = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE)
return model
@property
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
torch.manual_seed(0)
__a = VQModel(**self.dummy_movq_kwargs)
return model
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.dummy_unet
__a = self.dummy_movq
__a = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
__a = DDIMScheduler(**__SCREAMING_SNAKE_CASE)
__a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=0):
'''simple docstring'''
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__SCREAMING_SNAKE_CASE)).to(__SCREAMING_SNAKE_CASE)
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
__SCREAMING_SNAKE_CASE)
# create init_image
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(__SCREAMING_SNAKE_CASE)).to(__SCREAMING_SNAKE_CASE)
__a = image.cpu().permute(0 , 2 , 3 , 1)[0]
__a = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE)).convert('''RGB''').resize((256, 256))
if str(__SCREAMING_SNAKE_CASE).startswith('''mps'''):
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
else:
__a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(__SCREAMING_SNAKE_CASE)
__a = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = '''cpu'''
__a = self.get_dummy_components()
__a = self.pipeline_class(**__SCREAMING_SNAKE_CASE)
__a = pipe.to(__SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE))
__a = output.images
__a = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''')
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''')
__a = '''A red cartoon frog, 4k'''
__a = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa)
pipe_prior.to(__SCREAMING_SNAKE_CASE)
__a = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa)
__a = pipeline.to(__SCREAMING_SNAKE_CASE)
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = torch.Generator(device='''cpu''').manual_seed(0)
__a , __a = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__a = pipeline(
image=__SCREAMING_SNAKE_CASE , image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
| 131
| 1
|
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__lowercase : List[Any] = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b"
__lowercase : Any = str(bin(__lowerCAmelCase ) )[2:]
__lowercase : Dict = max(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(__lowerCAmelCase ) , b_binary.zfill(__lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 156
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__lowerCAmelCase : str = False
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Union[str, Any] = False
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : str = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
__lowerCAmelCase : Optional[int] = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
__lowerCAmelCase : str = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
__lowerCAmelCase : Any = reader.read()
__lowerCAmelCase : int = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
__lowerCAmelCase : Any = UNetaDModel(**config)
else:
__lowerCAmelCase : List[str] = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
__lowerCAmelCase : str = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__lowerCAmelCase : Union[str, Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__lowerCAmelCase : Dict = config[key]
del config[key]
__lowerCAmelCase : int = [k.replace("UNetRes", "") for k in config["down_block_types"]]
__lowerCAmelCase : Optional[Any] = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
__lowerCAmelCase : Any = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
__lowerCAmelCase : Tuple = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
__lowerCAmelCase : Dict = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
__lowerCAmelCase : Union[str, Any] = param_value
__lowerCAmelCase : str = True
if not has_changed:
__lowerCAmelCase : Union[str, Any] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 156
| 1
|
from __future__ import annotations
from math import pi
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
a__ : Optional[int] = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Optional[Any] = ["pixel_values"]
def __init__( self : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Dict , ) -> None:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 2_2_4}
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 2_5_6, "width": 2_5_6}
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , param_name="crop_size" )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_flip_channel_order
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PIL.Image.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
__SCREAMING_SNAKE_CASE = get_resize_output_image_size(UpperCAmelCase__ , size=size["shortest_edge"] , default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(UpperCAmelCase__ , size=(size["height"], size["width"]) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Dict , ) -> Dict:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray:
return flip_channel_order(UpperCAmelCase__ , data_format=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : str , ) -> PIL.Image.Image:
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
__SCREAMING_SNAKE_CASE = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , param_name="crop_size" )
__SCREAMING_SNAKE_CASE = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
__SCREAMING_SNAKE_CASE = [self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
__SCREAMING_SNAKE_CASE = [self.flip_channel_order(image=UpperCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Tuple] = None ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = target_sizes.numpy()
__SCREAMING_SNAKE_CASE = []
for idx in range(len(UpperCAmelCase__ ) ):
__SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = logits.argmax(dim=1 )
__SCREAMING_SNAKE_CASE = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 195
| 0
|
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
A : Tuple = [
"kernels/rwkv/wkv_cuda.cu",
"kernels/rwkv/wkv_op.cpp",
"kernels/deformable_detr/ms_deform_attn.h",
"kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh",
"models/graphormer/algos_graphormer.pyx",
]
def a__ ( __UpperCamelCase ):
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.")
A : int = parser.parse_args()
if args.check_lib:
A : List[str] = importlib.import_module("transformers")
A : Optional[int] = Path(transformers_module.__file__).parent
else:
A : Union[str, Any] = Path.cwd() / "build/lib/transformers"
if not test_custom_files_are_present(transformers_path):
raise ValueError("The built release does not contain the custom files. Fix this before going further!")
| 118
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A__ ( _snake_case , unittest.TestCase ):
lowercase = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def snake_case_ ( self , UpperCamelCase__=0 ) -> Tuple:
'''simple docstring'''
A_ = np.random.RandomState(UpperCamelCase__ )
A_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = 3 * [inputs["""prompt"""]]
# forward
A_ = pipe(**UpperCamelCase__ )
A_ = output.images[0, -3:, -3:, -1]
A_ = self.get_dummy_inputs()
A_ = 3 * [inputs.pop("""prompt""" )]
A_ = pipe.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="""np""" , )
A_ = text_inputs["""input_ids"""]
A_ = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
A_ = prompt_embeds
# forward
A_ = pipe(**UpperCamelCase__ )
A_ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = 3 * ["""this is a negative prompt"""]
A_ = negative_prompt
A_ = 3 * [inputs["""prompt"""]]
# forward
A_ = pipe(**UpperCamelCase__ )
A_ = output.images[0, -3:, -3:, -1]
A_ = self.get_dummy_inputs()
A_ = 3 * [inputs.pop("""prompt""" )]
A_ = []
for p in [prompt, negative_prompt]:
A_ = pipe.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="""np""" , )
A_ = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
A_ , A_ = embeds
# forward
A_ = pipe(**UpperCamelCase__ )
A_ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
@property
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = ort.SessionOptions()
A_ = False
return options
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
# using the PNDM scheduler by default
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
A_ = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = """open neural network exchange"""
A_ = np.random.RandomState(0 )
A_ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="""np""" )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = """open neural network exchange"""
A_ = np.random.RandomState(0 )
A_ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="""np""" )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = 0
def test_callback_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
A_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
A_ = latents[0, -3:, -3:, -1]
A_ = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
A_ = latents[0, -3:, -3:, -1]
A_ = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
A_ = False
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = """Andromeda galaxy in a bottle"""
A_ = np.random.RandomState(0 )
pipe(
prompt=UpperCamelCase__ , num_inference_steps=5 , guidance_scale=7.5 , generator=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert pipe.safety_checker is None
A_ = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
A_ = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
A_ = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 162
| 0
|
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def A ( lowercase ) -> List[Any]:
'''simple docstring'''
if hor == 128:
UpperCamelCase = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
UpperCamelCase = (32, 128, 256)
UpperCamelCase = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
UpperCamelCase = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
UpperCamelCase = (32, 64, 128, 256)
UpperCamelCase = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
UpperCamelCase = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
UpperCamelCase = model.state_dict()
UpperCamelCase = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65_536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
UpperCamelCase = UNetaDModel(**lowercase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
UpperCamelCase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCamelCase = state_dict.pop(lowercase )
hf_value_function.load_state_dict(lowercase )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w' ) as f:
json.dump(lowercase , lowercase )
def A ( ) -> str:
'''simple docstring'''
UpperCamelCase = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65_536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
UpperCamelCase = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
UpperCamelCase = model
UpperCamelCase = UNetaDModel(**lowercase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
UpperCamelCase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCamelCase = state_dict.pop(lowercase )
hf_value_function.load_state_dict(lowercase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(lowercase , lowercase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 354
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , A_ = 768 , ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Parameter(torch.zeros(1 , A_ ) )
UpperCamelCase = nn.Parameter(torch.ones(1 , A_ ) )
def __UpperCamelCase ( self , A_ = None , A_ = None , ) -> Any:
"""simple docstring"""
UpperCamelCase = nn.Parameter(self.mean.to(A_ ).to(A_ ) )
UpperCamelCase = nn.Parameter(self.std.to(A_ ).to(A_ ) )
return self
def __UpperCamelCase ( self , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def __UpperCamelCase ( self , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = (embeds * self.std) + self.mean
return embeds
| 110
| 0
|
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def a_ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : str =tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
_lowerCamelCase : str =DatasetInfosDict.from_directory(SCREAMING_SNAKE_CASE__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def a_ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : DatasetInfo ):
'''simple docstring'''
_lowerCamelCase : Tuple =str(SCREAMING_SNAKE_CASE__ )
dataset_info.write_to_directory(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : List[Any] =DatasetInfo.from_directory(SCREAMING_SNAKE_CASE__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , 'dataset_info.json' ) )
def a_ ( ):
'''simple docstring'''
_lowerCamelCase : Any =DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1_337 , post_processing_size=442 , dataset_size=1_234 , size_in_bytes=1_337 + 442 + 1_234 , )
_lowerCamelCase : List[str] =dataset_info._to_yaml_dict()
assert sorted(SCREAMING_SNAKE_CASE__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_lowerCamelCase : Optional[Any] =yaml.safe_dump(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : str =yaml.safe_load(SCREAMING_SNAKE_CASE__ )
assert dataset_info_yaml_dict == reloaded
def a_ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple =DatasetInfo()
_lowerCamelCase : Optional[int] =dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1_337 ),
} ),
] , )
def a_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : DatasetInfosDict ):
'''simple docstring'''
_lowerCamelCase : Tuple =str(SCREAMING_SNAKE_CASE__ )
dataset_infos_dict.write_to_directory(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Tuple =DatasetInfosDict.from_directory(SCREAMING_SNAKE_CASE__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_lowerCamelCase : List[Any] =config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_lowerCamelCase : Dict =DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , 'README.md' ) )
| 199
|
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
lowerCamelCase = True
from torch.cuda.amp import autocast
lowerCamelCase = logging.getLogger(__name__)
def a_ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class A :
UpperCamelCase__ : str =field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase__ : Optional[str] =field(
default=UpperCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase__ : Optional[bool] =field(
default=UpperCamelCase_ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
UpperCamelCase__ : Optional[float] =field(
default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} )
UpperCamelCase__ : Optional[float] =field(
default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
UpperCamelCase__ : Optional[float] =field(
default=0.1 , metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
} , )
UpperCamelCase__ : Optional[float] =field(
default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , )
UpperCamelCase__ : Optional[float] =field(
default=0.05 , metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
} , )
UpperCamelCase__ : Optional[float] =field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class A :
UpperCamelCase__ : Optional[str] =field(
default=UpperCamelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
UpperCamelCase__ : Optional[str] =field(
default='train+validation' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
UpperCamelCase__ : bool =field(
default=UpperCamelCase_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
UpperCamelCase__ : Optional[int] =field(
default=UpperCamelCase_ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
UpperCamelCase__ : Optional[int] =field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCamelCase__ : Optional[int] =field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
} , )
UpperCamelCase__ : List[str] =list_field(
default=[',', '?', '.', '!', '-', ';', ':', '""', '%', '\'', '"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , )
@dataclass
class A :
UpperCamelCase__ : WavaVecaProcessor
UpperCamelCase__ : Union[bool, str] =True
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : Optional[int] =None
def __call__( self : str , lowercase_ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_lowerCamelCase : List[Any] =[{'input_values': feature['input_values']} for feature in features]
_lowerCamelCase : str =[{'input_ids': feature['labels']} for feature in features]
_lowerCamelCase : Union[str, Any] =self.processor.pad(
lowercase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
_lowerCamelCase : Any =self.processor.pad(
labels=lowercase_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
_lowerCamelCase : str =labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
_lowerCamelCase : List[Any] =labels
return batch
class A ( UpperCamelCase_ ):
def lowerCamelCase ( self : Optional[Any] , lowercase_ : nn.Module , lowercase_ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
"""simple docstring"""
model.train()
_lowerCamelCase : List[Any] =self._prepare_inputs(lowercase_ )
if self.use_amp:
with autocast():
_lowerCamelCase : List[Any] =self.compute_loss(lowercase_ , lowercase_ )
else:
_lowerCamelCase : Optional[int] =self.compute_loss(lowercase_ , lowercase_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCamelCase : Optional[Any] =loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCamelCase : int =loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
_lowerCamelCase : Dict =loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowercase_ ).backward()
elif self.use_apex:
with amp.scale_loss(lowercase_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowercase_ )
else:
loss.backward()
return loss.detach()
def a_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any =parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_lowerCamelCase : Dict =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase : List[str] =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_lowerCamelCase : str =datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
_lowerCamelCase : int =datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
_lowerCamelCase : Dict =F'''[{''.join(data_args.chars_to_ignore )}]'''
def remove_special_characters(SCREAMING_SNAKE_CASE__ : Tuple ):
_lowerCamelCase : Optional[Any] =re.sub(SCREAMING_SNAKE_CASE__ , '' , batch['sentence'] ).lower() + ' '
return batch
_lowerCamelCase : int =train_dataset.map(SCREAMING_SNAKE_CASE__ , remove_columns=['sentence'] )
_lowerCamelCase : Tuple =eval_dataset.map(SCREAMING_SNAKE_CASE__ , remove_columns=['sentence'] )
def extract_all_chars(SCREAMING_SNAKE_CASE__ : Tuple ):
_lowerCamelCase : int =' '.join(batch['text'] )
_lowerCamelCase : Union[str, Any] =list(set(SCREAMING_SNAKE_CASE__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
_lowerCamelCase : Tuple =train_dataset.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=-1 , keep_in_memory=SCREAMING_SNAKE_CASE__ , remove_columns=train_dataset.column_names , )
_lowerCamelCase : Union[str, Any] =train_dataset.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=-1 , keep_in_memory=SCREAMING_SNAKE_CASE__ , remove_columns=eval_dataset.column_names , )
_lowerCamelCase : Optional[int] =list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
_lowerCamelCase : Tuple ={v: k for k, v in enumerate(SCREAMING_SNAKE_CASE__ )}
_lowerCamelCase : int =vocab_dict[' ']
del vocab_dict[" "]
_lowerCamelCase : Optional[Any] =len(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : int =len(SCREAMING_SNAKE_CASE__ )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : int =WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
_lowerCamelCase : Optional[int] =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0.0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Optional[int] =WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Any =WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_lowerCamelCase : List[str] =min(len(SCREAMING_SNAKE_CASE__ ) , data_args.max_train_samples )
_lowerCamelCase : Any =train_dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
if data_args.max_val_samples is not None:
_lowerCamelCase : Optional[int] =eval_dataset.select(range(data_args.max_val_samples ) )
_lowerCamelCase : str =torchaudio.transforms.Resample(48_000 , 16_000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(SCREAMING_SNAKE_CASE__ : List[Any] ):
_lowerCamelCase , _lowerCamelCase : Tuple =torchaudio.load(batch['path'] )
_lowerCamelCase : Optional[int] =resampler(SCREAMING_SNAKE_CASE__ ).squeeze().numpy()
_lowerCamelCase : Optional[Any] =16_000
_lowerCamelCase : Tuple =batch['text']
return batch
_lowerCamelCase : Any =train_dataset.map(
SCREAMING_SNAKE_CASE__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_lowerCamelCase : Optional[int] =eval_dataset.map(
SCREAMING_SNAKE_CASE__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
_lowerCamelCase : int =processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(SCREAMING_SNAKE_CASE__ )
return batch
_lowerCamelCase : str =train_dataset.map(
SCREAMING_SNAKE_CASE__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=SCREAMING_SNAKE_CASE__ , num_proc=data_args.preprocessing_num_workers , )
_lowerCamelCase : Tuple =eval_dataset.map(
SCREAMING_SNAKE_CASE__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=SCREAMING_SNAKE_CASE__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
_lowerCamelCase : Union[str, Any] =datasets.load_metric('wer' )
def compute_metrics(SCREAMING_SNAKE_CASE__ : str ):
_lowerCamelCase : Union[str, Any] =pred.predictions
_lowerCamelCase : Optional[int] =np.argmax(SCREAMING_SNAKE_CASE__ , axis=-1 )
_lowerCamelCase : Any =processor.tokenizer.pad_token_id
_lowerCamelCase : Any =processor.batch_decode(SCREAMING_SNAKE_CASE__ )
# we do not want to group tokens when computing the metrics
_lowerCamelCase : List[Any] =processor.batch_decode(pred.label_ids , group_tokens=SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Any =wer_metric.compute(predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_lowerCamelCase : Optional[int] =DataCollatorCTCWithPadding(processor=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
# Initialize our Trainer
_lowerCamelCase : Optional[Any] =CTCTrainer(
model=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , compute_metrics=SCREAMING_SNAKE_CASE__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_lowerCamelCase : Optional[Any] =last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_lowerCamelCase : Dict =model_args.model_name_or_path
else:
_lowerCamelCase : int =None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_lowerCamelCase : Optional[int] =trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model()
_lowerCamelCase : Any =train_result.metrics
_lowerCamelCase : Optional[int] =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE__ )
)
_lowerCamelCase : List[Any] =min(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics('train' , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('train' , SCREAMING_SNAKE_CASE__ )
trainer.save_state()
# Evaluation
_lowerCamelCase : str ={}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowerCamelCase : Tuple =trainer.evaluate()
_lowerCamelCase : List[Any] =data_args.max_val_samples if data_args.max_val_samples is not None else len(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Union[str, Any] =min(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE__ )
return results
if __name__ == "__main__":
main()
| 199
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCamelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , ):
output_path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , use_external_data_format=lowerCAmelCase__ , enable_onnx_checker=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , )
else:
export(
lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , )
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ):
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
UpperCAmelCase_ = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
UpperCAmelCase_ = "cpu"
UpperCAmelCase_ = Path(lowerCAmelCase__ )
# VAE DECODER
UpperCAmelCase_ = AutoencoderKL.from_pretrained(model_path + "/vae" )
UpperCAmelCase_ = vae_decoder.config.latent_channels
# forward only through the decoder part
UpperCAmelCase_ = vae_decoder.decode
onnx_export(
lowerCAmelCase__ , model_args=(
torch.randn(1 , lowerCAmelCase__ , 25 , 25 ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=lowerCAmelCase__ , )
del vae_decoder
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
lowerCamelCase = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("""SD: Done: ONNX""")
| 350
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''roberta'''
def __init__( self : int , _UpperCAmelCase : List[Any]=50265 , _UpperCAmelCase : str=768 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Tuple=3072 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : Optional[Any]=1e-12 , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : str=0 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Tuple="absolute" , _UpperCAmelCase : Any=True , _UpperCAmelCase : int=None , **_UpperCAmelCase : List[str] , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 241
| 0
|
import pickle
import numpy as np
from matplotlib import pyplot as plt
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0.2 , __magic_name__=0.2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = bp_numa
snake_case_ : Union[str, Any] = bp_numa
snake_case_ : Dict = bp_numa
snake_case_ : Optional[Any] = conva_get[:2]
snake_case_ : int = conva_get[2]
snake_case_ : Optional[int] = size_pa
snake_case_ : int = rate_w
snake_case_ : int = rate_t
snake_case_ : Optional[int] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
snake_case_ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
snake_case_ : Tuple = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
snake_case_ : int = -2 * np.random.rand(self.conva[1] ) + 1
snake_case_ : Any = -2 * np.random.rand(self.num_bpa ) + 1
snake_case_ : Optional[int] = -2 * np.random.rand(self.num_bpa ) + 1
def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(__magic_name__ , '''wb''' ) as f:
pickle.dump(__magic_name__ , __magic_name__ )
print(F'''Model saved: {save_path}''' )
@classmethod
def lowerCamelCase (cls , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
with open(__magic_name__ , '''rb''' ) as f:
snake_case_ : str = pickle.load(__magic_name__ ) # noqa: S301
snake_case_ : List[Any] = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
snake_case_ : str = model_dic.get('''size_pooling1''' )
snake_case_ : Any = model_dic.get('''num_bp1''' )
snake_case_ : Tuple = model_dic.get('''num_bp2''' )
snake_case_ : str = model_dic.get('''num_bp3''' )
snake_case_ : List[str] = model_dic.get('''rate_weight''' )
snake_case_ : Dict = model_dic.get('''rate_thre''' )
# create model instance
snake_case_ : Optional[Any] = CNN(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# modify model parameter
snake_case_ : Union[str, Any] = model_dic.get('''w_conv1''' )
snake_case_ : Union[str, Any] = model_dic.get('''wkj''' )
snake_case_ : int = model_dic.get('''vji''' )
snake_case_ : List[Any] = model_dic.get('''thre_conv1''' )
snake_case_ : Optional[Any] = model_dic.get('''thre_bp2''' )
snake_case_ : Optional[Any] = model_dic.get('''thre_bp3''' )
return conv_ins
def lowerCamelCase (self , __magic_name__ ) -> str:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def lowerCamelCase (self , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
return round(__magic_name__ , 3 )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : int = convs[0]
snake_case_ : Dict = convs[1]
snake_case_ : Optional[int] = np.shape(__magic_name__ )[0]
# get the data slice of original image data, data_focus
snake_case_ : Optional[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , __magic_name__ ):
for j_focus in range(0 , size_data - size_conv + 1 , __magic_name__ ):
snake_case_ : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__magic_name__ )
# calculate the feature map of every single kernel, and saved as list of matrix
snake_case_ : Optional[int] = []
snake_case_ : List[Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__magic_name__ ):
snake_case_ : List[str] = []
for i_focus in range(len(__magic_name__ ) ):
snake_case_ : Dict = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__magic_name__ ) )
snake_case_ : Any = np.asmatrix(__magic_name__ ).reshape(
__magic_name__ , __magic_name__ )
data_featuremap.append(__magic_name__ )
# expanding the data slice to One dimenssion
snake_case_ : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__magic_name__ ) )
snake_case_ : List[str] = np.asarray(__magic_name__ )
return focus_list, data_featuremap
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__="average_pool" ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = len(featuremaps[0] )
snake_case_ : Dict = int(size_map / size_pooling )
snake_case_ : Dict = []
for i_map in range(len(__magic_name__ ) ):
snake_case_ : List[str] = featuremaps[i_map]
snake_case_ : List[Any] = []
for i_focus in range(0 , __magic_name__ , __magic_name__ ):
for j_focus in range(0 , __magic_name__ , __magic_name__ ):
snake_case_ : List[str] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__magic_name__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__magic_name__ ) )
snake_case_ : Any = np.asmatrix(__magic_name__ ).reshape(__magic_name__ , __magic_name__ )
featuremap_pooled.append(__magic_name__ )
return featuremap_pooled
def lowerCamelCase (self , __magic_name__ ) -> Any:
'''simple docstring'''
snake_case_ : int = []
for i in range(len(__magic_name__ ) ):
snake_case_ : Optional[int] = np.shape(data[i] )
snake_case_ : int = data[i].reshape(1 , shapes[0] * shapes[1] )
snake_case_ : Optional[int] = data_listed.getA().tolist()[0]
data_expanded.extend(__magic_name__ )
snake_case_ : Dict = np.asarray(__magic_name__ )
return data_expanded
def lowerCamelCase (self , __magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = np.asarray(__magic_name__ )
snake_case_ : Optional[int] = np.shape(__magic_name__ )
snake_case_ : Union[str, Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : Dict = 0
for i_map in range(__magic_name__ ):
snake_case_ : List[Any] = np.ones((size_map, size_map) )
for i in range(0 , __magic_name__ , __magic_name__ ):
for j in range(0 , __magic_name__ , __magic_name__ ):
snake_case_ : Optional[int] = pd_pool[
i_pool
]
snake_case_ : int = i_pool + 1
snake_case_ : Optional[int] = np.multiply(
__magic_name__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(__magic_name__ )
return pd_all
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=bool ) -> Optional[int]:
'''simple docstring'''
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(__magic_name__ )) )
print((''' - - Shape: Teach_Data ''', np.shape(__magic_name__ )) )
snake_case_ : int = 0
snake_case_ : Union[str, Any] = []
snake_case_ : List[str] = 1_0000
while rp < n_repeat and mse >= error_accuracy:
snake_case_ : Dict = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(__magic_name__ ) ):
# print('------------Learning Image: %d--------------'%p)
snake_case_ : Optional[int] = np.asmatrix(datas_train[p] )
snake_case_ : List[str] = np.asarray(datas_teach[p] )
snake_case_ , snake_case_ : Any = self.convolute(
__magic_name__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
snake_case_ : List[str] = self.pooling(__magic_name__ , self.size_poolinga )
snake_case_ : str = np.shape(__magic_name__ )
snake_case_ : List[str] = self._expand(__magic_name__ )
snake_case_ : Optional[int] = data_bp_input
snake_case_ : Tuple = np.dot(__magic_name__ , self.vji.T ) - self.thre_bpa
snake_case_ : List[Any] = self.sig(__magic_name__ )
snake_case_ : Union[str, Any] = np.dot(__magic_name__ , self.wkj.T ) - self.thre_bpa
snake_case_ : Tuple = self.sig(__magic_name__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
snake_case_ : str = np.multiply(
(data_teach - bp_outa) , np.multiply(__magic_name__ , (1 - bp_outa) ) )
snake_case_ : Union[str, Any] = np.multiply(
np.dot(__magic_name__ , self.wkj ) , np.multiply(__magic_name__ , (1 - bp_outa) ) )
snake_case_ : Tuple = np.dot(__magic_name__ , self.vji )
snake_case_ : List[Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
snake_case_ : List[Any] = pd_conva_pooled.T.getA().tolist()
snake_case_ : str = self._calculate_gradient_from_pool(
__magic_name__ , __magic_name__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
snake_case_ : Optional[int] = self._expand_mat(pd_conva_all[k_conv] )
snake_case_ : Union[str, Any] = self.rate_weight * np.dot(__magic_name__ , __magic_name__ )
snake_case_ : List[str] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
snake_case_ : List[str] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
snake_case_ : Dict = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
snake_case_ : List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
snake_case_ : Any = self.thre_bpa - pd_k_all * self.rate_thre
snake_case_ : Any = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
snake_case_ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
snake_case_ : Optional[Any] = rp + 1
snake_case_ : Union[str, Any] = error_count / patterns
all_mse.append(__magic_name__ )
def draw_error():
snake_case_ : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__magic_name__ , '''+-''' )
plt.plot(__magic_name__ , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(__magic_name__ , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(__magic_name__ )) )
for p in range(len(__magic_name__ ) ):
snake_case_ : Optional[int] = np.asmatrix(datas_test[p] )
snake_case_ , snake_case_ : int = self.convolute(
__magic_name__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
snake_case_ : Union[str, Any] = self.pooling(__magic_name__ , self.size_poolinga )
snake_case_ : List[Any] = self._expand(__magic_name__ )
snake_case_ : Optional[Any] = data_bp_input
snake_case_ : int = bp_outa * self.vji.T - self.thre_bpa
snake_case_ : List[Any] = self.sig(__magic_name__ )
snake_case_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa
snake_case_ : Tuple = self.sig(__magic_name__ )
produce_out.extend(bp_outa.getA().tolist() )
snake_case_ : List[str] = [list(map(self.do_round , __magic_name__ ) ) for each in produce_out]
return np.asarray(__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = np.asmatrix(__magic_name__ )
snake_case_ , snake_case_ : Union[str, Any] = self.convolute(
__magic_name__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
snake_case_ : Optional[Any] = self.pooling(__magic_name__ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 279
|
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = SpeechTaTokenizer
lowerCamelCase_ : int = False
lowerCamelCase_ : Dict = True
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : Tuple = SpeechTaTokenizer(__magic_name__ )
snake_case_ : Any = AddedToken('''<mask>''' , lstrip=__magic_name__ , rstrip=__magic_name__ )
snake_case_ : int = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = '''this is a test'''
snake_case_ : int = '''this is a test'''
return input_text, output_text
def lowerCamelCase (self , __magic_name__ , __magic_name__=False , __magic_name__=20 , __magic_name__=5 ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ : int = self.get_input_output_texts(__magic_name__ )
snake_case_ : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
snake_case_ : Any = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
return text, ids
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[str] = '''<pad>'''
snake_case_ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(__magic_name__ ) , 81 )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : int = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : int = tokenizer.vocab_size
snake_case_ : Optional[Any] = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case_ : List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
snake_case_ : List[Any] = tokenizer.add_tokens(__magic_name__ )
snake_case_ : Dict = tokenizer.vocab_size
snake_case_ : Optional[Any] = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , len(__magic_name__ ) )
self.assertEqual(__magic_name__ , all_size + len(__magic_name__ ) )
snake_case_ : Union[str, Any] = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__magic_name__ )
self.assertGreaterEqual(len(__magic_name__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
snake_case_ : Union[str, Any] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
snake_case_ : List[str] = tokenizer.add_special_tokens(__magic_name__ )
snake_case_ : Dict = tokenizer.vocab_size
snake_case_ : Dict = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , len(__magic_name__ ) )
self.assertEqual(__magic_name__ , all_size_a + len(__magic_name__ ) )
snake_case_ : Tuple = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__magic_name__ )
self.assertGreaterEqual(len(__magic_name__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizer()
snake_case_ : Optional[Any] = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__magic_name__ , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
snake_case_ : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
snake_case_ : List[str] = tokenizer.convert_tokens_to_ids(__magic_name__ )
# fmt: off
self.assertListEqual(__magic_name__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
snake_case_ : int = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
snake_case_ : List[Any] = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__magic_name__ , )
| 279
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = '''convbert'''
def __init__( self : Optional[int] , lowerCAmelCase__ : str=3_0_5_2_2 , lowerCAmelCase__ : str=7_6_8 , lowerCAmelCase__ : Tuple=1_2 , lowerCAmelCase__ : Dict=1_2 , lowerCAmelCase__ : Tuple=3_0_7_2 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Optional[Any]=5_1_2 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Tuple=1e-12 , lowerCAmelCase__ : str=1 , lowerCAmelCase__ : Optional[int]=0 , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : int=7_6_8 , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : Union[str, Any]=9 , lowerCAmelCase__ : Optional[int]=1 , lowerCAmelCase__ : Optional[int]=None , **lowerCAmelCase__ : Optional[int] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : Tuple = vocab_size
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : List[Any] = intermediate_size
_UpperCAmelCase : List[str] = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Tuple = max_position_embeddings
_UpperCAmelCase : List[str] = type_vocab_size
_UpperCAmelCase : Dict = initializer_range
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : Any = embedding_size
_UpperCAmelCase : Tuple = head_ratio
_UpperCAmelCase : int = conv_kernel_size
_UpperCAmelCase : List[str] = num_groups
_UpperCAmelCase : Any = classifier_dropout
class A__ ( UpperCamelCase ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 17
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__a = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['LayoutLMv2FeatureExtractor']
__a = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 17
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_a : List[str] = logging.get_logger(__name__)
class __A ( __UpperCAmelCase ):
_UpperCamelCase : Optional[Any] = ["pixel_values"]
def __init__( self , a__ = True , a__ = None , a__ = PILImageResampling.BILINEAR , a__ = True , a__ = None , a__ = True , a__ = 1 / 255 , a__ = True , a__ = None , a__ = None , **a__ , ):
super().__init__(**lowerCamelCase__ )
_lowerCAmelCase : List[Any] = size if size is not None else {"""shortest_edge""": 256}
_lowerCAmelCase : int = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
_lowerCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_lowerCAmelCase : Optional[Any] = get_size_dict(lowerCamelCase__ , param_name="""crop_size""" )
_lowerCAmelCase : Dict = do_resize
_lowerCAmelCase : List[str] = size
_lowerCAmelCase : Tuple = resample
_lowerCAmelCase : Optional[Any] = do_center_crop
_lowerCAmelCase : str = crop_size
_lowerCAmelCase : Tuple = do_rescale
_lowerCAmelCase : int = rescale_factor
_lowerCAmelCase : Union[str, Any] = do_normalize
_lowerCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self , a__ , a__ , a__ = PILImageResampling.BICUBIC , a__ = None , **a__ , ):
_lowerCAmelCase : Dict = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
_lowerCAmelCase : int = get_resize_output_image_size(lowerCamelCase__ , size=size["""shortest_edge"""] , default_to_square=lowerCamelCase__ )
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def __A ( self , a__ , a__ , a__ = None , **a__ , ):
_lowerCAmelCase : List[Any] = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(lowerCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def __A ( self , a__ , a__ , a__ = None , **a__ ):
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def __A ( self , a__ , a__ , a__ , a__ = None , **a__ , ):
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def __A ( self , a__ , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = ChannelDimension.FIRST , **a__ , ):
_lowerCAmelCase : str = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : Any = size if size is not None else self.size
_lowerCAmelCase : Optional[int] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
_lowerCAmelCase : int = resample if resample is not None else self.resample
_lowerCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : List[Any] = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : Optional[int] = get_size_dict(lowerCamelCase__ , param_name="""crop_size""" )
_lowerCAmelCase : Any = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : List[Any] = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : Dict = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Tuple = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase : int = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
_lowerCAmelCase : List[Any] = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
_lowerCAmelCase : Union[str, Any] = [self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images]
if do_rescale:
_lowerCAmelCase : Optional[int] = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
_lowerCAmelCase : List[Any] = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
_lowerCAmelCase : Optional[Any] = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
_lowerCAmelCase : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : List[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCamelCase__ ):
_lowerCAmelCase : Union[str, Any] = target_sizes.numpy()
_lowerCAmelCase : int = []
for idx in range(len(lowerCamelCase__ ) ):
_lowerCAmelCase : Union[str, Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowerCamelCase__ )
_lowerCAmelCase : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
_lowerCAmelCase : Optional[int] = logits.argmax(dim=1 )
_lowerCAmelCase : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 44
|
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] ,lowerCamelCase__ : int="</s>" ,lowerCamelCase__ : str="<unk>" ,lowerCamelCase__ : Union[str, Any]="<pad>" ,lowerCamelCase__ : int=125 ,lowerCamelCase__ : str=None ,**lowerCamelCase__ : Union[str, Any] ,):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase__ = [f'''<extra_id_{i}>''' for i in range(lowerCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase__ = len(set(filter(lambda lowerCamelCase__ : bool('extra_id' in str(lowerCamelCase__ ) ) ,lowerCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else pad_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else eos_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else unk_token
super().__init__(
eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,extra_ids=lowerCamelCase__ ,additional_special_tokens=lowerCamelCase__ ,**lowerCamelCase__ ,)
UpperCAmelCase__ = extra_ids
UpperCAmelCase__ = 2**8 # utf is 8 bits
# define special tokens dict
UpperCAmelCase__ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
UpperCAmelCase__ = len(self.special_tokens_encoder )
UpperCAmelCase__ = len(lowerCamelCase__ )
for i, token in enumerate(lowerCamelCase__ ):
UpperCAmelCase__ = self.vocab_size + i - n
UpperCAmelCase__ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCamelCase__ )) + [1]
return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ):
if len(lowerCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
return token_ids_a + token_ids_a
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = [chr(lowerCamelCase__ ) for i in text.encode('utf-8' )]
return tokens
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : str ):
if token in self.special_tokens_encoder:
UpperCAmelCase__ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = self.added_tokens_encoder[token]
elif len(lowerCamelCase__ ) != 1:
UpperCAmelCase__ = self.unk_token_id
else:
UpperCAmelCase__ = ord(lowerCamelCase__ ) + self._num_special_tokens
return token_id
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Any ):
if index in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[index]
else:
UpperCAmelCase__ = chr(index - self._num_special_tokens )
return token
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = b''
for token in tokens:
if token in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
else:
UpperCAmelCase__ = bytes([ord(lowerCamelCase__ )] )
bstring += tok_string
UpperCAmelCase__ = bstring.decode('utf-8' ,errors='ignore' )
return string
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
return ()
| 98
| 0
|
from heapq import heappop, heappush
import numpy as np
def UpperCamelCase ( snake_case__ : np.ndarray , snake_case__ : tuple[int, int] , snake_case__ : tuple[int, int] , snake_case__ : bool , ) -> tuple[float | int, list[tuple[int, int]]]:
UpperCamelCase , UpperCamelCase : List[Any] = grid.shape
UpperCamelCase : Tuple = [-1, 1, 0, 0]
UpperCamelCase : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCamelCase , UpperCamelCase : List[str] = [(0, source)], set()
UpperCamelCase : Optional[Any] = np.full((rows, cols) , np.inf )
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : Optional[Any] = np.empty((rows, cols) , dtype=snake_case__ )
UpperCamelCase : List[str] = None
while queue:
((UpperCamelCase) , (UpperCamelCase)) : Optional[int] = heappop(snake_case__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCamelCase : str = []
while (x, y) != source:
path.append((x, y) )
UpperCamelCase , UpperCamelCase : Any = predecessors[x, y]
path.append(snake_case__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(snake_case__ ) ):
UpperCamelCase , UpperCamelCase : int = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCamelCase : str = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(snake_case__ , (dist + 1, (nx, ny)) )
UpperCamelCase : Optional[int] = dist + 1
UpperCamelCase : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = "data2vec-text"
def __init__( self, SCREAMING_SNAKE_CASE_=3_0522, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=1e-12, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_="absolute", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_, bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = vocab_size
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : int = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : str = hidden_act
UpperCamelCase : List[str] = intermediate_size
UpperCamelCase : Optional[int] = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : List[str] = type_vocab_size
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : List[str] = layer_norm_eps
UpperCamelCase : List[str] = position_embedding_type
UpperCamelCase : Any = use_cache
UpperCamelCase : Any = classifier_dropout
class lowerCAmelCase_ ( a__ ):
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCamelCase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 103
| 1
|
import functools
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
# Validation
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not all(isinstance(lowerCamelCase_ , lowerCamelCase_ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(lowerCamelCase_ ) != 3 or not all(isinstance(lowerCamelCase_ , lowerCamelCase_ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(lowerCamelCase_ ) == 0:
return 0
if min(lowerCamelCase_ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(lowerCamelCase_ ) >= 366:
raise ValueError('All days elements should be less than 366' )
_lowercase : Optional[int] = set(lowerCamelCase_ )
@functools.cache
def dynamic_programming(lowerCamelCase_ ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = 1
_lowercase : Any = 3
_lowercase : Tuple = (32, 32)
_lowercase : Tuple = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(lowerCamelCase)
return image
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, )
return model
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : str = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
return model
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Optional[int] = RobertaSeriesConfig(
hidden_size=32, project_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=50_06, )
return RobertaSeriesModelWithTransformation(lowerCamelCase)
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
def extract(*lowerCamelCase, **lowerCamelCase):
class _lowerCamelCase:
def __init__( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = torch.ones([0])
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
self.pixel_values.to(lowerCamelCase)
return self
return Out()
return extract
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : List[Any] = self.dummy_cond_unet
_lowercase : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase)
_lowercase : Optional[Any] = self.dummy_vae
_lowercase : List[Any] = self.dummy_text_encoder
_lowercase : Any = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta')
_lowercase : Tuple = 77
_lowercase : int = self.dummy_image.to(lowerCamelCase)
_lowercase : int = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_lowercase : Union[str, Any] = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, )
_lowercase : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=lowerCamelCase)
_lowercase : Optional[int] = alt_pipe.to(lowerCamelCase)
alt_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = 'A painting of a squirrel eating a burger'
_lowercase : Dict = torch.Generator(device=lowerCamelCase).manual_seed(0)
_lowercase : Any = alt_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='np', image=lowerCamelCase, )
_lowercase : Optional[int] = output.images
_lowercase : Optional[Any] = torch.Generator(device=lowerCamelCase).manual_seed(0)
_lowercase : Optional[Any] = alt_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='np', image=lowerCamelCase, return_dict=lowerCamelCase, )[0]
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
_lowercase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda', 'This test requires a GPU')
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = self.dummy_cond_unet
_lowercase : Tuple = PNDMScheduler(skip_prk_steps=lowerCamelCase)
_lowercase : str = self.dummy_vae
_lowercase : Optional[Any] = self.dummy_text_encoder
_lowercase : Optional[Any] = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta')
_lowercase : Optional[Any] = 77
_lowercase : str = self.dummy_image.to(lowerCamelCase)
# put models in fp16
_lowercase : List[str] = unet.half()
_lowercase : List[Any] = vae.half()
_lowercase : Any = bert.half()
# make sure here that pndm scheduler skips prk
_lowercase : Union[str, Any] = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, )
_lowercase : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=lowerCamelCase)
_lowercase : Any = alt_pipe.to(lowerCamelCase)
alt_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : int = 'A painting of a squirrel eating a burger'
_lowercase : Optional[Any] = torch.manual_seed(0)
_lowercase : Union[str, Any] = alt_pipe(
[prompt], generator=lowerCamelCase, num_inference_steps=2, output_type='np', image=lowerCamelCase, ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda', 'This test requires a GPU')
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
# resize to resolution that is divisible by 8 but not 16 or 32
_lowercase : str = init_image.resize((7_60, 5_04))
_lowercase : Optional[int] = 'BAAI/AltDiffusion'
_lowercase : str = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase, safety_checker=lowerCamelCase, )
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
pipe.enable_attention_slicing()
_lowercase : List[str] = 'A fantasy landscape, trending on artstation'
_lowercase : Any = torch.manual_seed(0)
_lowercase : Dict = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, strength=0.7_5, guidance_scale=7.5, generator=lowerCamelCase, output_type='np', )
_lowercase : List[str] = output.images[0]
_lowercase : Tuple = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
_lowercase : Optional[Any] = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : str = init_image.resize((7_68, 5_12))
_lowercase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy')
_lowercase : str = 'BAAI/AltDiffusion'
_lowercase : Optional[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase, safety_checker=lowerCamelCase, )
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
pipe.enable_attention_slicing()
_lowercase : int = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : int = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, strength=0.7_5, guidance_scale=7.5, generator=lowerCamelCase, output_type='np', )
_lowercase : Union[str, Any] = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image).max() < 1E-2
| 21
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase_ =logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase_ ="""
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def a_ ( _lowercase , _lowercase , _lowercase=8 ):
_UpperCamelCase : str = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_UpperCamelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _a ( _lowerCAmelCase ):
def __init__( self : List[str], lowerCAmelCase__ : UNetaDConditionModel, lowerCAmelCase__ : DDPMScheduler, lowerCAmelCase__ : VQModel, ) -> Dict:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=lowerCAmelCase__, scheduler=lowerCAmelCase__, movq=lowerCAmelCase__, )
_UpperCamelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case ( self : List[str], lowerCAmelCase__ : int, lowerCAmelCase__ : Tuple, lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : Any, lowerCAmelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if latents is None:
_UpperCamelCase : Any = randn_tensor(lowerCAmelCase__, generator=lowerCAmelCase__, device=lowerCAmelCase__, dtype=lowerCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
_UpperCamelCase : int = latents.to(lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = latents * scheduler.init_noise_sigma
return latents
def snake_case ( self : List[Any], lowerCAmelCase__ : str=0 ) -> Tuple:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_UpperCamelCase : str = torch.device(f"""cuda:{gpu_id}""" )
_UpperCamelCase : Optional[int] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase__, lowerCAmelCase__ )
def snake_case ( self : Any, lowerCAmelCase__ : Optional[Any]=0 ) -> Dict:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''', '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_UpperCamelCase : Optional[int] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''', silence_dtype_warnings=lowerCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_UpperCamelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = cpu_offload_with_hook(lowerCAmelCase__, lowerCAmelCase__, prev_module_hook=lowerCAmelCase__ )
# We'll offload the last model manually.
_UpperCamelCase : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if not hasattr(self.unet, '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase__, '''_hf_hook''' )
and hasattr(module._hf_hook, '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase__ )
def __call__( self : Tuple, lowerCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]], lowerCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]], lowerCAmelCase__ : torch.FloatTensor, lowerCAmelCase__ : int = 5_1_2, lowerCAmelCase__ : int = 5_1_2, lowerCAmelCase__ : int = 1_0_0, lowerCAmelCase__ : float = 4.0, lowerCAmelCase__ : int = 1, lowerCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, lowerCAmelCase__ : Optional[torch.FloatTensor] = None, lowerCAmelCase__ : Optional[str] = "pil", lowerCAmelCase__ : bool = True, ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Optional[int] = self._execution_device
_UpperCamelCase : Any = guidance_scale > 1.0
if isinstance(lowerCAmelCase__, lowerCAmelCase__ ):
_UpperCamelCase : str = torch.cat(lowerCAmelCase__, dim=0 )
if isinstance(lowerCAmelCase__, lowerCAmelCase__ ):
_UpperCamelCase : Tuple = torch.cat(lowerCAmelCase__, dim=0 )
if isinstance(lowerCAmelCase__, lowerCAmelCase__ ):
_UpperCamelCase : Optional[int] = torch.cat(lowerCAmelCase__, dim=0 )
_UpperCamelCase : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_UpperCamelCase : Union[str, Any] = image_embeds.repeat_interleave(lowerCAmelCase__, dim=0 )
_UpperCamelCase : List[Any] = negative_image_embeds.repeat_interleave(lowerCAmelCase__, dim=0 )
_UpperCamelCase : str = hint.repeat_interleave(lowerCAmelCase__, dim=0 )
_UpperCamelCase : Tuple = torch.cat([negative_image_embeds, image_embeds], dim=0 ).to(dtype=self.unet.dtype, device=lowerCAmelCase__ )
_UpperCamelCase : List[Any] = torch.cat([hint, hint], dim=0 ).to(dtype=self.unet.dtype, device=lowerCAmelCase__ )
self.scheduler.set_timesteps(lowerCAmelCase__, device=lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = self.scheduler.timesteps
_UpperCamelCase : Dict = self.movq.config.latent_channels
_UpperCamelCase , _UpperCamelCase : str = downscale_height_and_width(lowerCAmelCase__, lowerCAmelCase__, self.movq_scale_factor )
# create initial latent
_UpperCamelCase : List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width), image_embeds.dtype, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, self.scheduler, )
for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
_UpperCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCamelCase : Tuple = {'''image_embeds''': image_embeds, '''hint''': hint}
_UpperCamelCase : List[str] = self.unet(
sample=lowerCAmelCase__, timestep=lowerCAmelCase__, encoder_hidden_states=lowerCAmelCase__, added_cond_kwargs=lowerCAmelCase__, return_dict=lowerCAmelCase__, )[0]
if do_classifier_free_guidance:
_UpperCamelCase , _UpperCamelCase : str = noise_pred.split(latents.shape[1], dim=1 )
_UpperCamelCase , _UpperCamelCase : Dict = noise_pred.chunk(2 )
_UpperCamelCase , _UpperCamelCase : str = variance_pred.chunk(2 )
_UpperCamelCase : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_UpperCamelCase : List[str] = torch.cat([noise_pred, variance_pred_text], dim=1 )
if not (
hasattr(self.scheduler.config, '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_UpperCamelCase , _UpperCamelCase : str = noise_pred.split(latents.shape[1], dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase : List[str] = self.scheduler.step(
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, generator=lowerCAmelCase__, )[0]
# post-processing
_UpperCamelCase : Tuple = self.movq.decode(lowerCAmelCase__, force_not_quantize=lowerCAmelCase__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_UpperCamelCase : str = image * 0.5 + 0.5
_UpperCamelCase : Optional[int] = image.clamp(0, 1 )
_UpperCamelCase : Dict = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
_UpperCamelCase : List[str] = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 128
|
"""simple docstring"""
def a_ ( _lowercase , _lowercase ):
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
_UpperCamelCase : Optional[int] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_lowercase ) )
return round(_lowercase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : str = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git_vision_model'''
def __init__( self : Any , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Union[str, Any]="quick_gelu" , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any=0.02 , **UpperCAmelCase__ : Any , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : int) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__)
A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''') == "git":
A__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git'''
def __init__( self : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=30_522 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[str]=3_072 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=1_024 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Any=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=101 , UpperCAmelCase__ : Tuple=102 , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] , ) ->Any:
'''simple docstring'''
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
if vision_config is None:
A__ = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''')
A__ = GitVisionConfig(**UpperCAmelCase__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = tie_word_embeddings
A__ = num_image_with_embedding
A__ = bos_token_id
A__ = eos_token_id
def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 14
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''encoder-decoder'''
UpperCAmelCase__ = True
def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A__ = kwargs.pop('''encoder''')
A__ = encoder_config.pop('''model_type''')
A__ = kwargs.pop('''decoder''')
A__ = decoder_config.pop('''model_type''')
from ..auto.configuration_auto import AutoConfig
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''')
A__ = True
A__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.encoder.to_dict()
A__ = self.decoder.to_dict()
A__ = self.__class__.model_type
return output
| 14
| 1
|
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
_UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Any=None ):
"""simple docstring"""
if "." in tensor_name:
__UpperCAmelCase : Union[str, Any] = tensor_name.split(""".""" )
for split in splits[:-1]:
__UpperCAmelCase : Tuple = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
__UpperCAmelCase : Tuple = new_module
__UpperCAmelCase : List[Any] = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
__UpperCAmelCase : Tuple = tensor_name in module._buffers
__UpperCAmelCase : Tuple = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
__UpperCAmelCase : Any = False
__UpperCAmelCase : Tuple = False
if is_buffer or not is_bitsandbytes_available():
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Dict = False
else:
__UpperCAmelCase : Optional[Any] = hasattr(bnb.nn , """Params4bit""" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
__UpperCAmelCase : Tuple = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
__UpperCAmelCase : List[str] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
__UpperCAmelCase : str = old_value.to(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , torch.Tensor ):
__UpperCAmelCase : Tuple = value.to("""cpu""" )
if value.dtype == torch.inta:
__UpperCAmelCase : List[str] = version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
__UpperCAmelCase : Optional[Any] = torch.tensor(lowerCAmelCase__ , device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , lowerCAmelCase__ ) and fpaa_statistics is None:
__UpperCAmelCase : Union[str, Any] = new_value.T
__UpperCAmelCase : List[str] = old_value.__dict__
if is_abit:
__UpperCAmelCase : int = bnb.nn.IntaParams(lowerCAmelCase__ , requires_grad=lowerCAmelCase__ , **lowerCAmelCase__ ).to(lowerCAmelCase__ )
elif is_abit:
__UpperCAmelCase : Union[str, Any] = bnb.nn.Paramsabit(lowerCAmelCase__ , requires_grad=lowerCAmelCase__ , **lowerCAmelCase__ ).to(lowerCAmelCase__ )
__UpperCAmelCase : List[str] = new_value
if fpaa_statistics is not None:
setattr(module.weight , """SCB""" , fpaa_statistics.to(lowerCAmelCase__ ) )
else:
if value is None:
__UpperCAmelCase : Optional[Any] = old_value.to(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , torch.Tensor ):
__UpperCAmelCase : Tuple = value.to(lowerCAmelCase__ )
else:
__UpperCAmelCase : Optional[int] = torch.tensor(lowerCAmelCase__ , device=lowerCAmelCase__ )
if is_buffer:
__UpperCAmelCase : Any = new_value
else:
__UpperCAmelCase : Union[str, Any] = nn.Parameter(lowerCAmelCase__ , requires_grad=old_value.requires_grad )
__UpperCAmelCase : Dict = new_value
def lowercase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : int=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
__UpperCAmelCase : Any = []
current_key_name.append(lowerCAmelCase__ )
if (isinstance(lowerCAmelCase__ , nn.Linear ) or isinstance(lowerCAmelCase__ , lowerCAmelCase__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(lowerCAmelCase__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__UpperCAmelCase , __UpperCAmelCase : str = module.weight.shape
else:
__UpperCAmelCase : Optional[int] = module.in_features
__UpperCAmelCase : Tuple = module.out_features
if quantization_config.quantization_method() == "llm_int8":
__UpperCAmelCase : Optional[Any] = bnb.nn.LinearabitLt(
lowerCAmelCase__ , lowerCAmelCase__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
__UpperCAmelCase : int = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
__UpperCAmelCase : Optional[Any] = bnb.nn.Linearabit(
lowerCAmelCase__ , lowerCAmelCase__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
__UpperCAmelCase : str = True
# Store the module class in case we need to transpose the weight later
__UpperCAmelCase : Optional[Any] = type(lowerCAmelCase__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowerCAmelCase__ )
if len(list(module.children() ) ) > 0:
__UpperCAmelCase , __UpperCAmelCase : Dict = _replace_with_bnb_linear(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , has_been_replaced=lowerCAmelCase__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Union[str, Any]=None ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
__UpperCAmelCase , __UpperCAmelCase : Any = _replace_with_bnb_linear(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowercase_ ( *lowerCAmelCase__ : Any , **lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , lowerCAmelCase__ , )
return replace_with_bnb_linear(*lowerCAmelCase__ , **lowerCAmelCase__ )
def lowercase_ ( *lowerCAmelCase__ : int , **lowerCAmelCase__ : Tuple ):
"""simple docstring"""
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , lowerCAmelCase__ , )
return set_module_quantized_tensor_to_device(*lowerCAmelCase__ , **lowerCAmelCase__ )
def lowercase_ ( lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase : str = deepcopy(lowerCAmelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
__UpperCAmelCase : Optional[int] = find_tied_parameters(lowerCAmelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__UpperCAmelCase : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__UpperCAmelCase : List[str] = sum(lowerCAmelCase__ , [] )
__UpperCAmelCase : Union[str, Any] = len(lowerCAmelCase__ ) > 0
# Check if it is a base model
__UpperCAmelCase : Optional[Any] = not hasattr(lowerCAmelCase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__UpperCAmelCase : Tuple = list(model.named_children() )
__UpperCAmelCase : Dict = [list_modules[-1][0]]
# add last module together with tied weights
__UpperCAmelCase : str = set(lowerCAmelCase__ ) - set(lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = list(set(lowerCAmelCase__ ) ) + list(lowerCAmelCase__ )
# remove ".weight" from the keys
__UpperCAmelCase : Any = [""".weight""", """.bias"""]
__UpperCAmelCase : Dict = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__UpperCAmelCase : List[str] = name.replace(lowerCAmelCase__ , """""" )
filtered_module_names.append(lowerCAmelCase__ )
return filtered_module_names
| 16
|
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_UpperCamelCase = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str ):
"""simple docstring"""
return max(metric_fn(lowerCAmelCase__ , lowerCAmelCase__ ) for gt in ground_truths )
def lowercase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()]
__UpperCAmelCase : Union[str, Any] = []
if args.gold_data_mode == "qa":
__UpperCAmelCase : Tuple = pd.read_csv(lowerCAmelCase__ , sep="""\t""" , header=lowerCAmelCase__ )
for answer_list in data[1]:
__UpperCAmelCase : Optional[int] = ast.literal_eval(lowerCAmelCase__ )
answers.append(lowerCAmelCase__ )
else:
__UpperCAmelCase : Optional[int] = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()]
__UpperCAmelCase : str = [[reference] for reference in references]
__UpperCAmelCase : Optional[int] = 0
for prediction, ground_truths in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
total += 1
em += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
fa += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__UpperCAmelCase : int = 100.0 * em / total
__UpperCAmelCase : Dict = 100.0 * fa / total
logger.info(f'F1: {fa:.2f}' )
logger.info(f'EM: {em:.2f}' )
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Tuple = args.k
__UpperCAmelCase : Dict = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()]
__UpperCAmelCase : Dict = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()]
__UpperCAmelCase : Union[str, Any] = 0
for hypo, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
__UpperCAmelCase : List[str] = set(hypo.split("""\t""" )[:k] )
__UpperCAmelCase : List[Any] = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__UpperCAmelCase : List[str] = 100.0 * em / total
logger.info(f'Precision@{k}: {em: .2f}' )
def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict ):
"""simple docstring"""
def strip_title(lowerCAmelCase__ : Optional[int] ):
if title.startswith("""\"""" ):
__UpperCAmelCase : List[Any] = title[1:]
if title.endswith("""\"""" ):
__UpperCAmelCase : int = title[:-1]
return title
__UpperCAmelCase : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase__ , return_tensors="""pt""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , )["""input_ids"""].to(args.device )
__UpperCAmelCase : str = rag_model.rag.question_encoder(lowerCAmelCase__ )
__UpperCAmelCase : int = question_enc_outputs[0]
__UpperCAmelCase : Dict = rag_model.retriever(
lowerCAmelCase__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
__UpperCAmelCase : Union[str, Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__UpperCAmelCase : Union[str, Any] = []
for docs in all_docs:
__UpperCAmelCase : int = [strip_title(lowerCAmelCase__ ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(lowerCAmelCase__ ) )
return provenance_strings
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
with torch.no_grad():
__UpperCAmelCase : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase__ , return_tensors="""pt""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )
__UpperCAmelCase : List[str] = inputs_dict.input_ids.to(args.device )
__UpperCAmelCase : List[Any] = inputs_dict.attention_mask.to(args.device )
__UpperCAmelCase : List[str] = rag_model.generate( # rag_model overwrites generate
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCAmelCase__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__UpperCAmelCase : str = rag_model.retriever.generator_tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
if args.print_predictions:
for q, a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info("""Q: {} - A: {}""".format(lowerCAmelCase__ , lowerCAmelCase__ ) )
return answers
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=lowerCAmelCase__ , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=lowerCAmelCase__ , choices=["""exact""", """compressed""", """legacy"""] , type=lowerCAmelCase__ , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=lowerCAmelCase__ , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=lowerCAmelCase__ , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=lowerCAmelCase__ , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=lowerCAmelCase__ , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=lowerCAmelCase__ , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=lowerCAmelCase__ , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=lowerCAmelCase__ , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=lowerCAmelCase__ , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=lowerCAmelCase__ , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
__UpperCAmelCase : str = parser.parse_args()
__UpperCAmelCase : Optional[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def lowercase_ ( lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = {}
if args.model_type is None:
__UpperCAmelCase : str = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
__UpperCAmelCase : Tuple = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
__UpperCAmelCase : Dict = args.n_docs
if args.index_name is not None:
__UpperCAmelCase : Union[str, Any] = args.index_name
if args.index_path is not None:
__UpperCAmelCase : Dict = args.index_path
else:
__UpperCAmelCase : str = BartForConditionalGeneration
__UpperCAmelCase : str = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
__UpperCAmelCase : Any = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(lowerCAmelCase__ ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
__UpperCAmelCase : Optional[int] = RagRetriever.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__UpperCAmelCase : Any = model_class.from_pretrained(lowerCAmelCase__ , retriever=lowerCAmelCase__ , **lowerCAmelCase__ )
model.retriever.init_retrieval()
else:
__UpperCAmelCase : Tuple = model_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
__UpperCAmelCase : Union[str, Any] = []
for line in tqdm(lowerCAmelCase__ ):
questions.append(line.strip() )
if len(lowerCAmelCase__ ) == args.eval_batch_size:
__UpperCAmelCase : Any = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
preds_file.write("""\n""".join(lowerCAmelCase__ ) + """\n""" )
preds_file.flush()
__UpperCAmelCase : List[str] = []
if len(lowerCAmelCase__ ) > 0:
__UpperCAmelCase : Optional[Any] = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
preds_file.write("""\n""".join(lowerCAmelCase__ ) )
preds_file.flush()
score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_UpperCamelCase = get_args()
main(args)
| 16
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = KandinskyVaaControlnetPipeline
snake_case__ = ["image_embeds", "negative_image_embeds", "hint"]
snake_case__ = ["image_embeds", "negative_image_embeds", "hint"]
snake_case__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case__ = False
@property
def lowerCamelCase__ ( self : Tuple ):
return 32
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return 32
@property
def lowerCamelCase__ ( self : Dict ):
return self.time_input_dim
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self : Any ):
return 100
@property
def lowerCamelCase__ ( self : Optional[int] ):
torch.manual_seed(0 )
__lowerCamelCase : List[Any] = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__lowerCamelCase : str = UNetaDConditionModel(**UpperCAmelCase )
return model
@property
def lowerCamelCase__ ( self : Optional[int] ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__ ( self : Any ):
torch.manual_seed(0 )
__lowerCamelCase : Any = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : List[str] = self.dummy_unet
__lowerCamelCase : Union[str, Any] = self.dummy_movq
__lowerCamelCase : Optional[int] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=UpperCAmelCase , )
__lowerCamelCase : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : int=0 ):
__lowerCamelCase : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
__lowerCamelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase )
# create hint
__lowerCamelCase : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if str(UpperCAmelCase ).startswith("mps" ):
__lowerCamelCase : Any = torch.manual_seed(UpperCAmelCase )
else:
__lowerCamelCase : Tuple = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__lowerCamelCase : Any = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : Union[str, Any] = "cpu"
__lowerCamelCase : List[str] = self.get_dummy_components()
__lowerCamelCase : Optional[Any] = self.pipeline_class(**UpperCAmelCase )
__lowerCamelCase : str = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : str = pipe(**self.get_dummy_inputs(UpperCAmelCase ) )
__lowerCamelCase : List[str] = output.images
__lowerCamelCase : Any = pipe(
**self.get_dummy_inputs(UpperCAmelCase ) , return_dict=UpperCAmelCase , )[0]
__lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
__lowerCamelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : str = np.array(
[0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" )
__lowerCamelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
__lowerCamelCase : List[str] = torch.from_numpy(np.array(UpperCAmelCase ) ).float() / 2_5_5.0
__lowerCamelCase : List[Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__lowerCamelCase : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
__lowerCamelCase : Optional[int] = pipeline.to(UpperCAmelCase )
pipeline.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = "A robot, 4k photo"
__lowerCamelCase : List[str] = torch.Generator(device="cuda" ).manual_seed(0 )
__lowerCamelCase , __lowerCamelCase : Tuple = pipe_prior(
UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__lowerCamelCase : Any = torch.Generator(device="cuda" ).manual_seed(0 )
__lowerCamelCase : int = pipeline(
image_embeds=UpperCAmelCase , negative_image_embeds=UpperCAmelCase , hint=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=100 , output_type="np" , )
__lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
| 135
|
"""simple docstring"""
import numpy as np
from PIL import Image
def lowercase_ ( _lowerCamelCase: np.ndarray , _lowerCamelCase: int , _lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase : Dict = np.array(_lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Union[str, Any] = 0
# compute the shape of the output matrix
__lowerCamelCase : Any = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__lowerCamelCase : Dict = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__lowerCamelCase : int = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowerCamelCase : Any = 0
__lowerCamelCase : Optional[int] = 0
return updated_arr
def lowercase_ ( _lowerCamelCase: np.ndarray , _lowerCamelCase: int , _lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : str = 0
# compute the shape of the output matrix
__lowerCamelCase : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__lowerCamelCase : List[str] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__lowerCamelCase : Optional[Any] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Tuple = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
__A = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 135
| 1
|
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
lowercase_ :List[Any] = 1
lowercase_ :str = 1
while repunit:
lowercase_ :Union[str, Any] = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCamelCase ( _a = 1_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
lowercase_ :Union[str, Any] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_a ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"{solution() = }")
| 252
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : List[Any] ="""gpt_bigcode"""
lowercase : Dict =["""past_key_values"""]
lowercase : List[Any] ={
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , UpperCamelCase_=5_0257 , UpperCamelCase_=1024 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=None , UpperCamelCase_="gelu_pytorch_tanh" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=1E-5 , UpperCamelCase_=0.02 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=5_0256 , UpperCamelCase_=5_0256 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , **UpperCamelCase_ , ):
lowercase_ :Any = vocab_size
lowercase_ :List[str] = n_positions
lowercase_ :Union[str, Any] = n_embd
lowercase_ :Dict = n_layer
lowercase_ :Optional[int] = n_head
lowercase_ :List[str] = n_inner
lowercase_ :List[str] = activation_function
lowercase_ :Optional[int] = resid_pdrop
lowercase_ :Union[str, Any] = embd_pdrop
lowercase_ :Any = attn_pdrop
lowercase_ :Optional[Any] = layer_norm_epsilon
lowercase_ :str = initializer_range
lowercase_ :Optional[Any] = scale_attn_weights
lowercase_ :Any = use_cache
lowercase_ :Union[str, Any] = attention_softmax_in_fpaa
lowercase_ :int = scale_attention_softmax_in_fpaa
lowercase_ :Union[str, Any] = multi_query
lowercase_ :List[str] = bos_token_id
lowercase_ :Optional[int] = eos_token_id
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
| 252
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCAmelCase :Any = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A , A=7 , A=3 , A=1_8 , A=3_0 , A=4_0_0 , A=None , A=True , A=True , A=None , ) -> List[Any]:
_UpperCAmelCase : Any = size if size is not None else {'''height''': 2_0, '''width''': 2_0}
_UpperCAmelCase : int = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : Optional[int] = num_channels
_UpperCAmelCase : Dict = image_size
_UpperCAmelCase : List[str] = min_resolution
_UpperCAmelCase : List[str] = max_resolution
_UpperCAmelCase : str = size
_UpperCAmelCase : Dict = do_normalize
_UpperCAmelCase : Dict = do_convert_rgb
_UpperCAmelCase : Any = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
_UpperCAmelCase : Dict = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
def __lowerCAmelCase ( self ) -> Any:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : str = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
_UpperCAmelCase : Any = Image.open(requests.get(A , stream=A ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 ,reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' ,)
@require_torch
@require_vision
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =PixaStructImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[Any] = PixaStructImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_convert_rgb''' ) )
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : int = self.image_processor_tester.prepare_dummy_image()
_UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
_UpperCAmelCase : Any = 2_0_4_8
_UpperCAmelCase : Optional[Any] = image_processor(A , return_tensors='''pt''' , max_patches=A )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1E-3 , rtol=1E-3 ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Initialize image_processor
_UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_UpperCAmelCase : Dict = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : List[Any] = image_processor(
A , return_tensors='''pt''' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowerCAmelCase ( self ) -> Any:
# Initialize image_processor
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_UpperCAmelCase : Optional[Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
_UpperCAmelCase : Optional[Any] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(A ):
_UpperCAmelCase : Optional[int] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=A ).flattened_patches
_UpperCAmelCase : Union[str, Any] = '''Hello'''
_UpperCAmelCase : Optional[int] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=A , header_text=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : int = image_processor(
A , return_tensors='''pt''' , max_patches=A , header_text=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowerCAmelCase ( self ) -> str:
# Initialize image_processor
_UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
_UpperCAmelCase : Tuple = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : Tuple = image_processor(
A , return_tensors='''pt''' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowerCAmelCase ( self ) -> List[Any]:
# Initialize image_processor
_UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_UpperCAmelCase : str = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : Optional[int] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : Union[str, Any] = image_processor(
A , return_tensors='''pt''' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 ,reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' ,)
@require_torch
@require_vision
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =PixaStructImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = PixaStructImageProcessingTester(self , num_channels=4 )
_UpperCAmelCase : Optional[Any] = 3
@property
def __lowerCAmelCase ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_convert_rgb''' ) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
# Initialize image_processor
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_UpperCAmelCase : Optional[Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : Any = image_processor(
A , return_tensors='''pt''' , max_patches=A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 263
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase :str = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263
| 1
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class _a ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 1.0, SCREAMING_SNAKE_CASE_ = None, ) -> int:
super().__init__()
UpperCAmelCase_: Union[str, Any] = initial_learning_rate
UpperCAmelCase_: Optional[int] = warmup_steps
UpperCAmelCase_: List[Any] = power
UpperCAmelCase_: Any = decay_schedule_fn
UpperCAmelCase_: Dict = name
def __call__(self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
UpperCAmelCase_: Tuple = tf.cast(SCREAMING_SNAKE_CASE_, tf.floataa )
UpperCAmelCase_: Optional[Any] = tf.cast(self.warmup_steps, tf.floataa )
UpperCAmelCase_: int = global_step_float / warmup_steps_float
UpperCAmelCase_: Union[str, Any] = self.initial_learning_rate * tf.math.pow(SCREAMING_SNAKE_CASE_, self.power )
return tf.cond(
global_step_float < warmup_steps_float, lambda: warmup_learning_rate, lambda: self.decay_schedule_fn(step - self.warmup_steps ), name=SCREAMING_SNAKE_CASE_, )
def __snake_case (self ) -> Any:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def lowerCAmelCase_ (lowerCAmelCase__: float , lowerCAmelCase__: int , lowerCAmelCase__: int , lowerCAmelCase__: float = 0.0 , lowerCAmelCase__: float = 0.9 , lowerCAmelCase__: float = 0.999 , lowerCAmelCase__: float = 1e-8 , lowerCAmelCase__: Optional[float] = None , lowerCAmelCase__: Optional[float] = None , lowerCAmelCase__: float = 0.0 , lowerCAmelCase__: float = 1.0 , lowerCAmelCase__: Optional[List[str]] = None , ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=lowerCAmelCase__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=lowerCAmelCase__ , )
if num_warmup_steps:
UpperCAmelCase_: int = WarmUp(
initial_learning_rate=lowerCAmelCase__ , decay_schedule_fn=lowerCAmelCase__ , warmup_steps=lowerCAmelCase__ , )
if weight_decay_rate > 0.0:
UpperCAmelCase_: List[Any] = AdamWeightDecay(
learning_rate=lowerCAmelCase__ , weight_decay_rate=lowerCAmelCase__ , beta_a=lowerCAmelCase__ , beta_a=lowerCAmelCase__ , epsilon=lowerCAmelCase__ , clipnorm=lowerCAmelCase__ , global_clipnorm=lowerCAmelCase__ , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=lowerCAmelCase__ , )
else:
UpperCAmelCase_: List[Any] = tf.keras.optimizers.Adam(
learning_rate=lowerCAmelCase__ , beta_a=lowerCAmelCase__ , beta_a=lowerCAmelCase__ , epsilon=lowerCAmelCase__ , clipnorm=lowerCAmelCase__ , global_clipnorm=lowerCAmelCase__ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class _a ( _lowerCAmelCase ):
def __init__(self, SCREAMING_SNAKE_CASE_ = 0.0_0_1, SCREAMING_SNAKE_CASE_ = 0.9, SCREAMING_SNAKE_CASE_ = 0.9_9_9, SCREAMING_SNAKE_CASE_ = 1E-7, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = 0.0, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "AdamWeightDecay", **SCREAMING_SNAKE_CASE_, ) -> List[Any]:
super().__init__(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = weight_decay_rate
UpperCAmelCase_: Optional[int] = include_in_weight_decay
UpperCAmelCase_: List[str] = exclude_from_weight_decay
@classmethod
def __snake_case (cls, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: Optional[int] = {"""WarmUp""": WarmUp}
return super(SCREAMING_SNAKE_CASE_, cls ).from_config(SCREAMING_SNAKE_CASE_, custom_objects=SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
super(SCREAMING_SNAKE_CASE_, self )._prepare_local(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = tf.constant(
self.weight_decay_rate, name="""adam_weight_decay_rate""" )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCAmelCase_: Union[str, Any] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""], use_locking=self._use_locking, )
return tf.no_op()
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_: Dict = list(zip(*SCREAMING_SNAKE_CASE_ ) )
return super(SCREAMING_SNAKE_CASE_, self ).apply_gradients(zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ), name=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
UpperCAmelCase_: Optional[Any] = apply_state or {}
UpperCAmelCase_: Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
UpperCAmelCase_: Any = self._fallback_apply_state(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_: int = self._get_lr(var.device, var.dtype.base_dtype, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_, self )._resource_apply_dense(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_: int = self._get_lr(var.device, var.dtype.base_dtype, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_, self )._resource_apply_sparse(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> int:
UpperCAmelCase_: Dict = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) is not None:
return False
return True
class _a ( _lowerCAmelCase ):
def __init__(self ) -> str:
UpperCAmelCase_: int = []
UpperCAmelCase_: Union[str, Any] = None
@property
def __snake_case (self ) -> int:
if self._accum_steps is None:
UpperCAmelCase_: Dict = tf.Variable(
tf.constant(0, dtype=tf.intaa ), trainable=SCREAMING_SNAKE_CASE_, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, )
return self._accum_steps.value()
@property
def __snake_case (self ) -> int:
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__(self, SCREAMING_SNAKE_CASE_ ) -> Any:
if not self._gradients:
UpperCAmelCase_: int = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(SCREAMING_SNAKE_CASE_ ), trainable=SCREAMING_SNAKE_CASE_, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(SCREAMING_SNAKE_CASE_ ) != len(self._gradients ):
raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(SCREAMING_SNAKE_CASE_ )}' )
for accum_gradient, gradient in zip(self._gradients, SCREAMING_SNAKE_CASE_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(SCREAMING_SNAKE_CASE_ )
self._accum_steps.assign_add(1 )
def __snake_case (self ) -> List[Any]:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(SCREAMING_SNAKE_CASE_ ) )
| 82
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a : Optional[int] = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
a : Any = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: int=False ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_: int = create_model(
"""HTSAT-tiny""" , """roberta""" , lowerCAmelCase__ , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=lowerCAmelCase__ , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = {}
UpperCAmelCase_: Optional[Any] = r""".*sequential.(\d+).*"""
UpperCAmelCase_: str = r""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase_: Optional[int] = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
UpperCAmelCase_: int = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
UpperCAmelCase_: Dict = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(lowerCAmelCase__ )//3}.linear.' )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_: int = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase_: Optional[Any] = 1 if projecton_layer == 0 else 2
UpperCAmelCase_: Tuple = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase_: str = value
UpperCAmelCase_: Optional[int] = mixed_qkv.size(0 ) // 3
UpperCAmelCase_: Optional[int] = mixed_qkv[:qkv_dim]
UpperCAmelCase_: List[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase_: int = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase_: str = query_layer
UpperCAmelCase_: List[Any] = key_layer
UpperCAmelCase_: str = value_layer
else:
UpperCAmelCase_: Tuple = value
return model_state_dict
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: List[Any]=False ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
UpperCAmelCase_: Optional[Any] = clap_model.state_dict()
UpperCAmelCase_: Optional[Any] = rename_state_dict(lowerCAmelCase__ )
UpperCAmelCase_: Dict = ClapConfig()
UpperCAmelCase_: Tuple = enable_fusion
UpperCAmelCase_: int = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
a : Optional[Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 82
| 1
|
import random
from .binary_exp_mod import bin_exp_mod
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=1000 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowercase = n - 1
lowercase = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowercase = 0
while count < prec:
lowercase = random.randint(2 , n - 1 )
lowercase = bin_exp_mod(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if b != 1:
lowercase = True
for _ in range(lowerCAmelCase__ ):
if b == n - 1:
lowercase = False
break
lowercase = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowercase__ :Tuple = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 101
|
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
| 1
|
"""simple docstring"""
import math
def a__ ( snake_case__ : list , snake_case__ : int = 0 , snake_case__ : int = 0 ) -> list:
lowerCamelCase = end or len(__lowerCAmelCase )
for i in range(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase = i
lowerCamelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCamelCase = array[temp_index - 1]
temp_index -= 1
lowerCamelCase = temp_index_value
return array
def a__ ( snake_case__ : list , snake_case__ : int , snake_case__ : int ) -> None: # Max Heap
lowerCamelCase = index
lowerCamelCase = 2 * index + 1 # Left Node
lowerCamelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCamelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCamelCase = right_index
if largest != index:
lowerCamelCase , lowerCamelCase = array[largest], array[index]
heapify(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def a__ ( snake_case__ : list ) -> list:
lowerCamelCase = len(__lowerCAmelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for i in range(n - 1 , 0 , -1 ):
lowerCamelCase , lowerCamelCase = array[0], array[i]
heapify(__lowerCAmelCase , 0 , __lowerCAmelCase )
return array
def a__ ( snake_case__ : list , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a__ ( snake_case__ : list , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> int:
lowerCamelCase = low
lowerCamelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCamelCase , lowerCamelCase = array[j], array[i]
i += 1
def a__ ( snake_case__ : list ) -> list:
if len(__lowerCAmelCase ) == 0:
return array
lowerCamelCase = 2 * math.ceil(math.loga(len(__lowerCAmelCase ) ) )
lowerCamelCase = 16
return intro_sort(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) , __lowerCAmelCase , __lowerCAmelCase )
def a__ ( snake_case__ : list , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(__lowerCAmelCase )
max_depth -= 1
lowerCamelCase = median_of_a(__lowerCAmelCase , __lowerCAmelCase , start + ((end - start) // 2) + 1 , end - 1 )
lowerCamelCase = partition(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
intro_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase = p
return insertion_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : Optional[int] = input("""Enter numbers separated by a comma : """).strip()
lowerCAmelCase : Dict = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 353
|
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *_a , _a=None , _a=None , **_a ):
"""simple docstring"""
super().__init__(*_a , **_a )
lowerCamelCase = eval_examples
lowerCamelCase = post_process_function
def _lowerCAmelCase ( self , _a=None , _a=None , _a=None , _a = "eval" ):
"""simple docstring"""
lowerCamelCase = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase = self.get_eval_dataloader(_a )
lowerCamelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase = self.compute_metrics
lowerCamelCase = None
lowerCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCamelCase = time.time()
try:
lowerCamelCase = eval_loop(
_a , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
lowerCamelCase = compute_metrics
lowerCamelCase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase = self.post_process_function(_a , _a , output.predictions )
lowerCamelCase = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase = metrics.pop(_a )
metrics.update(output.metrics )
else:
lowerCamelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , _a )
return metrics
def _lowerCAmelCase ( self , _a , _a , _a=None , _a = "test" ):
"""simple docstring"""
lowerCamelCase = self.get_test_dataloader(_a )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase = self.compute_metrics
lowerCamelCase = None
lowerCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCamelCase = time.time()
try:
lowerCamelCase = eval_loop(
_a , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
lowerCamelCase = compute_metrics
lowerCamelCase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase = self.post_process_function(_a , _a , output.predictions , """predict""" )
lowerCamelCase = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase = metrics.pop(_a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_a )
| 168
| 0
|
import math
import sys
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if number != int(lowerCAmelCase_ ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
SCREAMING_SNAKE_CASE =[-1] * (number + 1)
SCREAMING_SNAKE_CASE =0
for i in range(1, number + 1 ):
SCREAMING_SNAKE_CASE =sys.maxsize
SCREAMING_SNAKE_CASE =int(math.sqrt(lowerCAmelCase_ ) )
for j in range(1, root + 1 ):
SCREAMING_SNAKE_CASE =1 + answers[i - (j**2)]
SCREAMING_SNAKE_CASE =min(lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 334
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger()
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def _lowerCAmelCase ( self : Any ,snake_case : Any ,snake_case : Tensor ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self : int ,snake_case : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Tuple ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 0
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
__UpperCAmelCase = field(default_factory=lowerCamelCase_ )
def __call__( self : int ,snake_case : Tensor ):
SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) )
SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) )
if len(snake_case ) != len(snake_case ):
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case )} operations while'
f' destination module has {len(snake_case )}.' )
for dest_m, src_m in zip(snake_case ,snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True ):
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE =timm.create_model(lowerCAmelCase_, pretrained=lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ResNetForImageClassification(lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase_ )
assert torch.allclose(from_model(lowerCAmelCase_ ), our_model(lowerCAmelCase_ ).logits ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE =F'resnet{"-".join(name.split("resnet" ) )}'
print(lowerCAmelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, )
# we can use the convnext one
SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, )
print(F'Pushed {checkpoint_name}' )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE =1000
SCREAMING_SNAKE_CASE =(1, num_labels)
SCREAMING_SNAKE_CASE ='huggingface/label-files'
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =idalabel
SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[64, 128, 256, 512], layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(lowerCAmelCase_, names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334
| 1
|
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowercase_ :
"""simple docstring"""
def __init__( self : Any ,lowercase__ : int ):
__lowercase = data
__lowercase = [0x67_452_301, 0xef_cda_b89, 0x98_bad_cfe, 0x10_325_476, 0xc3_d2e_1f0]
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : Tuple ,lowercase__ : Any ):
return ((n << b) | (n >> (3_2 - b))) & 0xff_fff_fff
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = b'''\x80''' + b'''\x00''' * (6_3 - (len(self.data ) + 8) % 6_4)
__lowercase = self.data + padding + struct.pack('''>Q''' ,8 * len(self.data ) )
return padded_data
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return [
self.padded_data[i : i + 6_4] for i in range(0 ,len(self.padded_data ) ,6_4 )
]
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Any ):
__lowercase = list(struct.unpack('''>16L''' ,lowercase__ ) ) + [0] * 6_4
for i in range(1_6 ,8_0 ):
__lowercase = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) ,1 )
return w
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.padding()
__lowercase = self.split_blocks()
for block in self.blocks:
__lowercase = self.expand_block(lowercase__ )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = self.h
for i in range(0 ,8_0 ):
if 0 <= i < 2_0:
__lowercase = (b & c) | ((~b) & d)
__lowercase = 0x5a_827_999
elif 2_0 <= i < 4_0:
__lowercase = b ^ c ^ d
__lowercase = 0x6e_d9e_ba1
elif 4_0 <= i < 6_0:
__lowercase = (b & c) | (b & d) | (c & d)
__lowercase = 0x8f_1bb_cdc
elif 6_0 <= i < 8_0:
__lowercase = b ^ c ^ d
__lowercase = 0xca_62c_1d6
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = (
self.rotate(lowercase__ ,5 ) + f + e + k + expanded_block[i] & 0xff_fff_fff,
a,
self.rotate(lowercase__ ,3_0 ),
c,
d,
)
__lowercase = (
self.h[0] + a & 0xff_fff_fff,
self.h[1] + b & 0xff_fff_fff,
self.h[2] + c & 0xff_fff_fff,
self.h[3] + d & 0xff_fff_fff,
self.h[4] + e & 0xff_fff_fff,
)
return ("{:08x}" * 5).format(*self.h )
def _A ( ):
"""simple docstring"""
__lowercase = b'''Test String'''
assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324
def _A ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
__lowercase = parser.parse_args()
__lowercase = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
__lowercase = f.read()
else:
__lowercase = bytes(A__ , '''utf-8''' )
print(SHAaHash(A__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 52
|
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
stooge(A__ , 0 , len(A__ ) - 1 )
return arr
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
__lowercase , __lowercase = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
__lowercase = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(A__ , A__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(A__ , i + t , (A__) )
# Recursively sort first 2/3 elements
stooge(A__ , A__ , (h - t) )
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 52
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Any = "convbert"
def __init__( self : List[str], UpperCAmelCase__ : Tuple=3_0_5_2_2, UpperCAmelCase__ : Tuple=7_6_8, UpperCAmelCase__ : int=1_2, UpperCAmelCase__ : Tuple=1_2, UpperCAmelCase__ : Any=3_0_7_2, UpperCAmelCase__ : List[str]="gelu", UpperCAmelCase__ : Union[str, Any]=0.1, UpperCAmelCase__ : str=0.1, UpperCAmelCase__ : Tuple=5_1_2, UpperCAmelCase__ : Any=2, UpperCAmelCase__ : int=0.02, UpperCAmelCase__ : List[str]=1E-12, UpperCAmelCase__ : Union[str, Any]=1, UpperCAmelCase__ : Union[str, Any]=0, UpperCAmelCase__ : Dict=2, UpperCAmelCase__ : Any=7_6_8, UpperCAmelCase__ : List[Any]=2, UpperCAmelCase__ : int=9, UpperCAmelCase__ : Optional[Any]=1, UpperCAmelCase__ : Optional[int]=None, **UpperCAmelCase__ : List[str], ):
super().__init__(
pad_token_id=UpperCAmelCase__, bos_token_id=UpperCAmelCase__, eos_token_id=UpperCAmelCase__, **UpperCAmelCase__, )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = embedding_size
__lowercase = head_ratio
__lowercase = conv_kernel_size
__lowercase = num_groups
__lowercase = classifier_dropout
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
@property
def _lowercase ( self : Union[str, Any] ):
if self.task == "multiple-choice":
__lowercase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowercase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 17
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : int=1_0_0, UpperCAmelCase__ : Any=1_3, UpperCAmelCase__ : List[Any]=3_0, UpperCAmelCase__ : Dict=2, UpperCAmelCase__ : Any=3, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[str]=True, UpperCAmelCase__ : Optional[Any]=3_2, UpperCAmelCase__ : Any=5, UpperCAmelCase__ : Any=4, UpperCAmelCase__ : Any=3_7, UpperCAmelCase__ : Optional[int]="gelu", UpperCAmelCase__ : Dict=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Dict=1_0, UpperCAmelCase__ : Tuple=0.02, UpperCAmelCase__ : List[Any]=3, ):
__lowercase = parent
__lowercase = vocab_size
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = num_patches + 1
def _lowercase ( self : int ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size], self.type_sequence_label_size )
__lowercase = BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=UpperCAmelCase__, initializer_range=self.initializer_range, )
return config, pixel_values, labels
def _lowercase ( self : Any, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[str] ):
__lowercase = FlaxBeitModel(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : List[Any] ):
__lowercase = FlaxBeitForMaskedImageModeling(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowercase ( self : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any] ):
__lowercase = self.type_sequence_label_size
__lowercase = FlaxBeitForImageClassification(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = FlaxBeitForImageClassification(UpperCAmelCase__ )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) = config_and_inputs
__lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _lowercase ( self : List[Any] ):
__lowercase = FlaxBeitModelTester(self )
__lowercase = ConfigTester(self, config_class=UpperCAmelCase__, has_text_modality=UpperCAmelCase__, hidden_size=3_7 )
def _lowercase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[int] ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(UpperCAmelCase__ )
__lowercase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1], UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = model_class(UpperCAmelCase__ )
@jax.jit
def model_jitted(UpperCAmelCase__ : str, **UpperCAmelCase__ : Dict ):
return model(pixel_values=UpperCAmelCase__, **UpperCAmelCase__ )
with self.subTest("JIT Enabled" ):
__lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__, UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape, output.shape )
def _lowercase ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def _lowercase ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
__lowercase = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(UpperCAmelCase__ )
def _A ( ) -> str:
'''simple docstring'''
__lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_vision
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Optional[int] ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _lowercase ( self : Any ):
__lowercase = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ).pixel_values
# prepare bool_masked_pos
__lowercase = np.ones((1, 1_9_6), dtype=UpperCAmelCase__ )
# forward pass
__lowercase = model(pixel_values=UpperCAmelCase__, bool_masked_pos=UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3], UpperCAmelCase__, atol=1E-2 ) )
@slow
def _lowercase ( self : Any ):
__lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" )
# forward pass
__lowercase = model(**UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 1_0_0_0)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
__lowercase = 2_8_1
self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
@slow
def _lowercase ( self : List[str] ):
__lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" )
# forward pass
__lowercase = model(**UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 2_1_8_4_1)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
__lowercase = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
| 17
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : int = {
"""kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""",
"""kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""",
"""kssteven/ibert-roberta-large-mnli""": (
"""https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''ibert'''
def __init__( self : Any , snake_case__ : Dict=3_0_5_2_2 , snake_case__ : Any=7_6_8 , snake_case__ : Any=1_2 , snake_case__ : Tuple=1_2 , snake_case__ : Tuple=3_0_7_2 , snake_case__ : Dict="gelu" , snake_case__ : int=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Dict=5_1_2 , snake_case__ : int=2 , snake_case__ : Dict=0.02 , snake_case__ : Union[str, Any]=1e-12 , snake_case__ : List[str]=1 , snake_case__ : str=0 , snake_case__ : Dict=2 , snake_case__ : Dict="absolute" , snake_case__ : List[Any]=False , snake_case__ : Dict="none" , **snake_case__ : List[str] , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Tuple = hidden_size
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase__ : Dict = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : Optional[int] = type_vocab_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : Tuple = layer_norm_eps
UpperCAmelCase__ : List[Any] = position_embedding_type
UpperCAmelCase__ : Any = quant_mode
UpperCAmelCase__ : int = force_dequant
class lowerCAmelCase__ ( __magic_name__ ):
@property
def __a ( self : Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase__ : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase__ : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 356
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
_lowerCAmelCase : List[Any] = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
_lowerCAmelCase : int = {
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = set()
UpperCAmelCase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Dict = char
UpperCAmelCase__ : Tuple = set(snake_case )
return pairs
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple="<s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Any="<unk>" , snake_case__ : int="<pad>" , snake_case__ : List[str]="<mask>" , **snake_case__ : Optional[int] , ):
'''simple docstring'''
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
UpperCAmelCase__ : Dict = vocab_file
UpperCAmelCase__ : Tuple = merges_file
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Dict = 3
self.add_from_file(snake_case__ )
UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ : Tuple = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges]
UpperCAmelCase__ : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Dict = {}
def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
UpperCAmelCase__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def __a ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self : List[str] ):
'''simple docstring'''
return len(self.encoder )
def __a ( self : Any ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : Dict , snake_case__ : Tuple ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Optional[Any] = tuple(snake_case__ )
UpperCAmelCase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
UpperCAmelCase__ : Any = get_pairs(snake_case__ )
if not pairs:
return token
while True:
UpperCAmelCase__ : List[Any] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Tuple = 0
while i < len(snake_case__ ):
try:
UpperCAmelCase__ : Union[str, Any] = word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : Dict = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : Dict = tuple(snake_case__ )
UpperCAmelCase__ : List[Any] = new_word
if len(snake_case__ ) == 1:
break
else:
UpperCAmelCase__ : Dict = get_pairs(snake_case__ )
UpperCAmelCase__ : List[Any] = "@@ ".join(snake_case__ )
UpperCAmelCase__ : Optional[int] = word[:-4]
UpperCAmelCase__ : Union[str, Any] = word
return word
def __a ( self : List[Any] , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : int = re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def __a ( self : Dict , snake_case__ : List[str] ):
'''simple docstring'''
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def __a ( self : List[Any] , snake_case__ : Any ):
'''simple docstring'''
return self.decoder.get(snake_case__ , self.unk_token )
def __a ( self : str , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = " ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def __a ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase__ : Tuple = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : str = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case__ ):
copyfile(self.merges_file , snake_case__ )
return out_vocab_file, out_merge_file
def __a ( self : List[Any] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
try:
with open(snake_case__ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(snake_case__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
UpperCAmelCase__ : Dict = f.readlines()
for lineTmp in lines:
UpperCAmelCase__ : Optional[int] = lineTmp.strip()
UpperCAmelCase__ : Tuple = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
UpperCAmelCase__ : Any = line[:idx]
UpperCAmelCase__ : str = len(self.encoder )
| 298
| 0
|
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _A (__a , __a , __a , __a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE_ : Tuple = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
SCREAMING_SNAKE_CASE_ : List[str] = f'{src_lang}-{tgt_lang}'
SCREAMING_SNAKE_CASE_ : List[Any] = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=__a , exist_ok=__a )
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(__a , '''README.md''' )
print(f'Generating {path}' )
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(__a )
# make sure we are under the root of the project
UpperCAmelCase_ : Union[str, Any] = Path(__file__).resolve().parent.parent.parent
UpperCAmelCase_ : Optional[int] = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCAmelCase_ : int = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 91
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCAmelCase_ : Any = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *lowercase_ : List[str] , **lowercase_ : List[str]):
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 91
| 1
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): # noqa: E741
'''simple docstring'''
while r - l > 1:
__SCREAMING_SNAKE_CASE = (l + r) // 2
if v[m] >= key:
__SCREAMING_SNAKE_CASE = m
else:
__SCREAMING_SNAKE_CASE = m # noqa: E741
return r
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if len(lowerCAmelCase_ ) == 0:
return 0
__SCREAMING_SNAKE_CASE = [0] * len(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = v[0]
for i in range(1 , len(lowerCAmelCase_ ) ):
if v[i] < tail[0]:
__SCREAMING_SNAKE_CASE = v[i]
elif v[i] > tail[length - 1]:
__SCREAMING_SNAKE_CASE = v[i]
length += 1
else:
__SCREAMING_SNAKE_CASE = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [0 for i in range(r + 1 )]
# nc0 = 1
__SCREAMING_SNAKE_CASE = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ , lowerCAmelCase_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 195
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowerCAmelCase ( UpperCAmelCase__ ):
lowercase = "trocr"
lowercase = ["past_key_values"]
lowercase = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self , __UpperCAmelCase=5_0265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=512 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = activation_function
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = init_std
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = use_cache
__UpperCamelCase = scale_embedding
__UpperCamelCase = use_learned_position_embeddings
__UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
| 316
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__: List[str] = logging.get_logger(__name__)
A__: Union[str, Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : int = "data2vec-text"
def __init__( self :str , SCREAMING_SNAKE_CASE :Optional[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Any=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[str]=1_2 , SCREAMING_SNAKE_CASE :Dict=3_0_7_2 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :Any=0.1 , SCREAMING_SNAKE_CASE :List[str]=0.1 , SCREAMING_SNAKE_CASE :int=5_1_2 , SCREAMING_SNAKE_CASE :int=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :Dict=1e-12 , SCREAMING_SNAKE_CASE :int=1 , SCREAMING_SNAKE_CASE :Dict=0 , SCREAMING_SNAKE_CASE :List[Any]=2 , SCREAMING_SNAKE_CASE :str="absolute" , SCREAMING_SNAKE_CASE :Tuple=True , SCREAMING_SNAKE_CASE :Union[str, Any]=None , **SCREAMING_SNAKE_CASE :Union[str, Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
_a : Optional[Any] =vocab_size
_a : Optional[Any] =hidden_size
_a : Any =num_hidden_layers
_a : List[str] =num_attention_heads
_a : Union[str, Any] =hidden_act
_a : Any =intermediate_size
_a : str =hidden_dropout_prob
_a : Optional[Any] =attention_probs_dropout_prob
_a : Optional[Any] =max_position_embeddings
_a : Union[str, Any] =type_vocab_size
_a : Tuple =initializer_range
_a : Optional[int] =layer_norm_eps
_a : Tuple =position_embedding_type
_a : int =use_cache
_a : List[str] =classifier_dropout
class A__ ( UpperCAmelCase__ ):
@property
def __UpperCAmelCase ( self :int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_a : Tuple ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_a : List[Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 276
| 0
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCAmelCase_ = ['text', 'image', 'audio']
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Optional[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((5_12, 5_12) ) )
elif input_type == "audio":
inputs.append(torch.ones(30_00 ) )
elif isinstance(__magic_name__ , __magic_name__ ):
inputs.append(create_inputs(__magic_name__ ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def snake_case( __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : Any = []
for output in outputs:
if isinstance(__magic_name__ , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(__magic_name__ , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(__magic_name__ , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _A :
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
lowercase : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , _A ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase : Tuple = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def __a ( self : Dict ) -> Any:
"""simple docstring"""
lowercase : Union[str, Any] = create_inputs(self.tool.inputs )
lowercase : Dict = self.tool(*_A )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase : str = [outputs]
self.assertListEqual(output_types(_A ) , self.tool.outputs )
def __a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def __a ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase : str = create_inputs(self.tool.inputs )
lowercase : int = self.tool(*_A )
if not isinstance(_A , _A ):
lowercase : Optional[int] = [outputs]
self.assertEqual(len(_A ) , len(self.tool.outputs ) )
for output, output_type in zip(_A , self.tool.outputs ):
lowercase : List[Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_A , _A ) )
def __a ( self : List[str] ) -> Any:
"""simple docstring"""
lowercase : int = create_inputs(self.tool.inputs )
lowercase : Tuple = []
for _input, input_type in zip(_A , self.tool.inputs ):
if isinstance(_A , _A ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase : int = self.tool(*_A )
if not isinstance(_A , _A ):
lowercase : Union[str, Any] = [outputs]
self.assertEqual(len(_A ) , len(self.tool.outputs ) )
| 116
|
import os
from collections.abc import Iterator
def snake_case( __magic_name__ = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(__magic_name__ ):
lowercase : Tuple = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__magic_name__ )[1] in (".py", ".ipynb"):
yield os.path.join(__magic_name__ , __magic_name__ ).lstrip('''./''' )
def snake_case( __magic_name__ ) -> Dict:
'''simple docstring'''
return F"""{i * ' '}*""" if i else "\n##"
def snake_case( __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Dict = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__magic_name__ ) or old_parts[i] != new_part) and new_part:
print(F"""{md_prefix(__magic_name__ )} {new_part.replace('_' , ' ' ).title()}""" )
return new_path
def snake_case( __magic_name__ = "." ) -> None:
'''simple docstring'''
lowercase : str = ''''''
for filepath in sorted(good_file_paths(__magic_name__ ) ):
lowercase , lowercase : Optional[int] = os.path.split(__magic_name__ )
if filepath != old_path:
lowercase : str = print_path(__magic_name__ , __magic_name__ )
lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase : Optional[Any] = F"""{filepath}/{filename}""".replace(''' ''' , '''%20''' )
lowercase : List[str] = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(F"""{md_prefix(__magic_name__ )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('.')
| 116
| 1
|
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowerCAmelCase_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ) -> Tuple:
# Recurse if needed
if "." in tensor_name:
lowercase__ : List[str] = tensor_name.split('''.''' )
for split in splits[:-1]:
lowercase__ : List[Any] = getattr(__lowerCamelCase , __lowerCamelCase )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
lowercase__ : Optional[int] = new_module
lowercase__ : Tuple = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
lowercase__ : List[Any] = tensor_name in module._buffers
lowercase__ : Any = getattr(__lowerCamelCase , __lowerCamelCase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
lowercase__ : Any = False
lowercase__ : List[str] = False
if is_buffer or not is_bitsandbytes_available():
lowercase__ : Union[str, Any] = False
lowercase__ : str = False
else:
lowercase__ : Any = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowercase__ : int = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowercase__ : Tuple = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowercase__ : List[Any] = old_value.to(__lowerCamelCase )
elif isinstance(__lowerCamelCase , torch.Tensor ):
lowercase__ : str = value.to('''cpu''' )
if value.dtype == torch.inta:
lowercase__ : str = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
lowercase__ : Tuple = torch.tensor(__lowerCamelCase , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __lowerCamelCase ) and fpaa_statistics is None:
lowercase__ : str = new_value.T
lowercase__ : str = old_value.__dict__
if is_abit:
lowercase__ : Union[str, Any] = bnb.nn.IntaParams(__lowerCamelCase , requires_grad=__lowerCamelCase , **__lowerCamelCase ).to(__lowerCamelCase )
elif is_abit:
lowercase__ : Optional[int] = bnb.nn.Paramsabit(__lowerCamelCase , requires_grad=__lowerCamelCase , **__lowerCamelCase ).to(__lowerCamelCase )
lowercase__ : Tuple = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(__lowerCamelCase ) )
else:
if value is None:
lowercase__ : Union[str, Any] = old_value.to(__lowerCamelCase )
elif isinstance(__lowerCamelCase , torch.Tensor ):
lowercase__ : List[Any] = value.to(__lowerCamelCase )
else:
lowercase__ : Optional[Any] = torch.tensor(__lowerCamelCase , device=__lowerCamelCase )
if is_buffer:
lowercase__ : Tuple = new_value
else:
lowercase__ : List[Any] = nn.Parameter(__lowerCamelCase , requires_grad=old_value.requires_grad )
lowercase__ : int = new_value
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False ) -> List[str]:
for name, module in model.named_children():
if current_key_name is None:
lowercase__ : Tuple = []
current_key_name.append(__lowerCamelCase )
if (isinstance(__lowerCamelCase , nn.Linear ) or isinstance(__lowerCamelCase , __lowerCamelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(__lowerCamelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase__ , lowercase__ : str = module.weight.shape
else:
lowercase__ : str = module.in_features
lowercase__ : Union[str, Any] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowercase__ : Union[str, Any] = bnb.nn.LinearabitLt(
__lowerCamelCase , __lowerCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowercase__ : List[Any] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowercase__ : Tuple = bnb.nn.Linearabit(
__lowerCamelCase , __lowerCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowercase__ : Optional[int] = True
# Store the module class in case we need to transpose the weight later
lowercase__ : str = type(__lowerCamelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__lowerCamelCase )
if len(list(module.children() ) ) > 0:
lowercase__ , lowercase__ : Any = _replace_with_bnb_linear(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_been_replaced=__lowerCamelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None ) -> Optional[Any]:
lowercase__ : Dict = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
lowercase__ , lowercase__ : str = _replace_with_bnb_linear(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __UpperCAmelCase ( *__lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]:
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , __lowerCamelCase , )
return replace_with_bnb_linear(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( *__lowerCamelCase , **__lowerCamelCase ) -> str:
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , __lowerCamelCase , )
return set_module_quantized_tensor_to_device(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
lowercase__ : List[str] = deepcopy(__lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowercase__ : Union[str, Any] = find_tied_parameters(__lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase__ : Optional[int] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase__ : Union[str, Any] = sum(__lowerCamelCase , [] )
lowercase__ : Dict = len(__lowerCamelCase ) > 0
# Check if it is a base model
lowercase__ : Optional[Any] = not hasattr(__lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase__ : Optional[int] = list(model.named_children() )
lowercase__ : int = [list_modules[-1][0]]
# add last module together with tied weights
lowercase__ : int = set(__lowerCamelCase ) - set(__lowerCamelCase )
lowercase__ : Dict = list(set(__lowerCamelCase ) ) + list(__lowerCamelCase )
# remove ".weight" from the keys
lowercase__ : Dict = ['''.weight''', '''.bias''']
lowercase__ : Dict = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase__ : List[str] = name.replace(__lowerCamelCase , '''''' )
filtered_module_names.append(__lowerCamelCase )
return filtered_module_names
| 16
|
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class __A ( A_ ):
'''simple docstring'''
def __init__( self : List[str] ,**_snake_case : Dict ) -> List[Any]:
"""simple docstring"""
super().__init__(**_snake_case )
requires_backends(self ,'''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] ,_snake_case : Union[str, List[str], "Image", List["Image"]] ,**_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,**_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = {}
if "candidate_labels" in kwargs:
lowercase__ : Any = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowercase__ : Optional[Any] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Dict=None ,_snake_case : Union[str, Any]="This is a photo of {}." ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = load_image(_snake_case )
lowercase__ : int = self.image_processor(images=[image] ,return_tensors=self.framework )
lowercase__ : str = candidate_labels
lowercase__ : Dict = [hypothesis_template.format(_snake_case ) for x in candidate_labels]
lowercase__ : Any = self.tokenizer(_snake_case ,return_tensors=self.framework ,padding=_snake_case )
lowercase__ : Optional[int] = [text_inputs]
return inputs
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = model_inputs.pop('''candidate_labels''' )
lowercase__ : Union[str, Any] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] ,_snake_case ):
lowercase__ : List[str] = text_inputs[0]
else:
# Batching case.
lowercase__ : int = text_inputs[0][0]
lowercase__ : Tuple = self.model(**_snake_case ,**_snake_case )
lowercase__ : Union[str, Any] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Dict = model_outputs.pop('''candidate_labels''' )
lowercase__ : Optional[Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowercase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__ : Tuple = probs.tolist()
if not isinstance(_snake_case ,_snake_case ):
lowercase__ : Any = [scores]
elif self.framework == "tf":
lowercase__ : List[str] = stable_softmax(_snake_case ,axis=-1 )
lowercase__ : Optional[Any] = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
lowercase__ : Union[str, Any] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_snake_case ,_snake_case ) ,key=lambda _snake_case : -x[0] )
]
return result
| 16
| 1
|
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase( UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
UpperCamelCase = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
UpperCamelCase = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
UpperCamelCase = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
UpperCamelCase = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
UpperCamelCase = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
UpperCamelCase = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
UpperCamelCase = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
UpperCamelCase = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
UpperCamelCase = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
UpperCamelCase = key.replace("""image_encoder.module""" , """flava.image_model""" )
UpperCamelCase = key.replace("""text_encoder.module""" , """flava.text_model""" )
UpperCamelCase = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
UpperCamelCase = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
UpperCamelCase = key.replace("""text_projection""" , """flava.text_projection""" )
UpperCamelCase = key.replace("""image_projection""" , """flava.image_projection""" )
UpperCamelCase = value.float()
for key, value in codebook_state_dict.items():
UpperCamelCase = value
return upgrade
@torch.no_grad()
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ) -> List[str]:
'''simple docstring'''
if config_path is not None:
UpperCamelCase = FlavaConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase = FlavaConfig()
UpperCamelCase = FlavaForPreTraining(__SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase = convert_dalle_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , save_checkpoint=__SCREAMING_SNAKE_CASE )
if os.path.exists(__SCREAMING_SNAKE_CASE ):
UpperCamelCase = torch.load(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )
else:
UpperCamelCase = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )
UpperCamelCase = upgrade_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(__SCREAMING_SNAKE_CASE )
UpperCamelCase = hf_model.state_dict()
UpperCamelCase = count_parameters(__SCREAMING_SNAKE_CASE )
UpperCamelCase = count_parameters(__SCREAMING_SNAKE_CASE ) + count_parameters(__SCREAMING_SNAKE_CASE )
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 )
hf_model.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 370
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""YolosFeatureExtractor"""]
_SCREAMING_SNAKE_CASE = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 165
| 0
|
import math
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase = f'Input value of [number={number}] must be an integer'
raise TypeError(lowerCamelCase__ )
if number < 1:
lowerCamelCase = f'Input value of [number={number}] must be > 0'
raise ValueError(lowerCamelCase__ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowerCamelCase = int(math.log(number // 3 , 2 ) ) + 2
lowerCamelCase = [3, 5]
lowerCamelCase = 2
lowerCamelCase = 3
for block in range(1 , lowerCamelCase__ ):
for _ in range(lowerCamelCase__ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
UpperCAmelCase : Optional[Any] = 0
try:
UpperCAmelCase : Optional[Any] = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 252
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
UpperCAmelCase : Any = None
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Any = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : Optional[int] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
UpperCAmelCase : List[str] = {
"google/rembert": 2_56,
}
UpperCAmelCase : List[str] = "▁"
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Dict = VOCAB_FILES_NAMES
UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Tuple = RemBertTokenizer
def __init__( self , A=None , A=None , A=True , A=True , A=False , A="[CLS]" , A="[SEP]" , A="<unk>" , A="[SEP]" , A="<pad>" , A="[CLS]" , A="[MASK]" , **A , ) -> Any:
'''simple docstring'''
lowerCamelCase = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , **A , )
lowerCamelCase = do_lower_case
lowerCamelCase = remove_space
lowerCamelCase = keep_accents
lowerCamelCase = vocab_file
lowerCamelCase = False if not self.vocab_file else True
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , A , A = None , A = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A ):
logger.error("""Vocabulary path ({}) should be a directory""".format(A ) )
return
lowerCamelCase = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 252
| 1
|
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
_UpperCAmelCase = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 4_8000,
'sample_size': 6_5536,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 4_8000,
'sample_size': 6_5536,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 4_8000,
'sample_size': 13_1072,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 1_6000,
'sample_size': 6_5536,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 1_6000,
'sample_size': 6_5536,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 1_6000,
'sample_size': 6_5536,
},
}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :int ) -> List[str]:
return torch.atana(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / math.pi * 2
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[int] ) -> int:
__lowerCAmelCase : Any = torch.sin(t * math.pi / 2 ) ** 2
__lowerCAmelCase : str = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
class snake_case_ ( __lowercase ):
pass
class snake_case_ ( nn.Module ):
def __init__( self : Union[str, Any] , _snake_case : Optional[int] )->Any:
'''simple docstring'''
super().__init__()
__lowerCAmelCase : Dict = DiffusionAttnUnetaD(_snake_case , n_attn_layers=4 )
__lowerCAmelCase : Union[str, Any] = deepcopy(self.diffusion )
__lowerCAmelCase : int = torch.quasirandom.SobolEngine(1 , scramble=_snake_case )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int ) -> List[str]:
__lowerCAmelCase : Optional[Any] = MODELS_MAP[model_name]["""url"""]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
_UpperCAmelCase = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
_UpperCAmelCase = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
_UpperCAmelCase = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
_UpperCAmelCase = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
_UpperCAmelCase = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
_UpperCAmelCase = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[str] ) -> Dict:
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Any ) -> Optional[int]:
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE ) and not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return name.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif name.startswith(SCREAMING_SNAKE_CASE ):
return [name.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[int]=13 ) -> Dict:
__lowerCAmelCase : List[Any] = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
__lowerCAmelCase : Tuple = 0
if string.startswith("""net.3.""" ):
depth += 1
__lowerCAmelCase : Dict = string[6:]
elif string.startswith("""net.""" ):
__lowerCAmelCase : int = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
__lowerCAmelCase : Tuple = string[7:]
if string.startswith("""main.""" ):
__lowerCAmelCase : List[Any] = string[5:]
# mid block
if string[:2].isdigit():
__lowerCAmelCase : Optional[Any] = string[:2]
__lowerCAmelCase : Union[str, Any] = string[2:]
else:
__lowerCAmelCase : List[str] = string[0]
__lowerCAmelCase : Optional[int] = string[1:]
if depth == max_depth:
__lowerCAmelCase : Optional[Any] = MID_NUM_TO_LAYER[layer_num]
__lowerCAmelCase : Any = """mid_block"""
elif depth > 0 and int(SCREAMING_SNAKE_CASE ) < 7:
__lowerCAmelCase : str = DOWN_NUM_TO_LAYER[layer_num]
__lowerCAmelCase : Tuple = F'''down_blocks.{depth}'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE ) > 7:
__lowerCAmelCase : Tuple = UP_NUM_TO_LAYER[layer_num]
__lowerCAmelCase : Tuple = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
__lowerCAmelCase : Optional[Any] = DEPTH_0_TO_LAYER[layer_num]
__lowerCAmelCase : List[Any] = F'''up_blocks.{max_depth - 1}''' if int(SCREAMING_SNAKE_CASE ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
__lowerCAmelCase : List[Any] = string_left[1:]
if "resnets" in new_layer:
__lowerCAmelCase : Optional[int] = convert_resconv_naming(SCREAMING_SNAKE_CASE )
elif "attentions" in new_layer:
__lowerCAmelCase : str = convert_attn_naming(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = prefix + """.""" + new_layer + """.""" + string_left
else:
__lowerCAmelCase : Optional[int] = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int ) -> Optional[Any]:
__lowerCAmelCase : Optional[int] = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
__lowerCAmelCase : str = rename(SCREAMING_SNAKE_CASE )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = transform_conv_attns(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[str] = v
return new_state_dict
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any ) -> str:
if len(SCREAMING_SNAKE_CASE ) == 1:
if len(v.shape ) == 3:
# weight
__lowerCAmelCase : Tuple = v[:, :, 0]
else:
# bias
__lowerCAmelCase : List[str] = v
else:
# qkv matrices
__lowerCAmelCase : Tuple = v.shape[0]
__lowerCAmelCase : Any = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__lowerCAmelCase : str = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__lowerCAmelCase : Any = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int ) -> List[Any]:
__lowerCAmelCase : int = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__lowerCAmelCase : Any = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
__lowerCAmelCase : Union[str, Any] = download(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = MODELS_MAP[model_name]["""sample_rate"""]
__lowerCAmelCase : Optional[Any] = MODELS_MAP[model_name]["""sample_size"""]
__lowerCAmelCase : Dict = Object()
__lowerCAmelCase : int = sample_size
__lowerCAmelCase : Tuple = sample_rate
__lowerCAmelCase : int = 0
__lowerCAmelCase : Tuple = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE , sample_rate=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = diffusers_model.state_dict()
__lowerCAmelCase : Tuple = DiffusionUncond(SCREAMING_SNAKE_CASE )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE )["""state_dict"""] )
__lowerCAmelCase : List[Any] = orig_model.diffusion_ema.eval()
__lowerCAmelCase : Optional[Any] = orig_model.state_dict()
__lowerCAmelCase : Union[str, Any] = rename_orig_weights(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__lowerCAmelCase : int = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("""kernel""" ) for k in list(SCREAMING_SNAKE_CASE ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
__lowerCAmelCase : List[Any] = value.squeeze()
__lowerCAmelCase : Optional[int] = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = 100
__lowerCAmelCase : int = 33
__lowerCAmelCase : Tuple = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE )[:-1]
__lowerCAmelCase : List[Any] = get_crash_schedule(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = torch.manual_seed(33 )
__lowerCAmelCase : Any = pipe(num_inference_steps=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).audios
__lowerCAmelCase : Union[str, Any] = sampling.iplms_sample(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , {} )
__lowerCAmelCase : Union[str, Any] = generated.clamp(-1 , 1 )
__lowerCAmelCase : List[Any] = (generated - audio).abs().sum()
__lowerCAmelCase : Optional[int] = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , SCREAMING_SNAKE_CASE )
print("""Diff max""" , SCREAMING_SNAKE_CASE )
assert diff_max < 1e-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
_UpperCAmelCase = parser.parse_args()
main(args)
| 232
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_UpperCAmelCase = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_UpperCAmelCase = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
_UpperCAmelCase = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def UpperCAmelCase__ ( self : Tuple )->str:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def UpperCAmelCase__ ( self : Dict , _snake_case : List[Any] , _snake_case : int , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , )->str:
'''simple docstring'''
__lowerCAmelCase : List[str] = len(references[0] )
if any(len(_snake_case ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
__lowerCAmelCase : str = [[refs[i] for refs in references] for i in range(_snake_case )]
__lowerCAmelCase : Tuple = TER(
normalized=_snake_case , no_punct=_snake_case , asian_support=_snake_case , case_sensitive=_snake_case , )
__lowerCAmelCase : List[Any] = sb_ter.corpus_score(_snake_case , _snake_case )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 232
| 1
|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_lowerCAmelCase : Union[str, Any] = {"tokenization_tapex": ["TapexTokenizer"]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 169
|
_lowerCAmelCase : Dict = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_lowerCAmelCase : str = ["a", "b", "c", "d", "e"]
def lowerCAmelCase ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = start
# add current to visited
visited.append(_lowerCAmelCase )
UpperCAmelCase__ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
UpperCAmelCase__ = topological_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# if all neighbors visited add current to sort
sort.append(_lowerCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
for vertice in vertices:
if vertice not in visited:
UpperCAmelCase__ = topological_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# return sort
return sort
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = topological_sort("a", [], [])
print(sort)
| 169
| 1
|
"""simple docstring"""
import os
import sys
import unittest
__SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__SCREAMING_SNAKE_CASE = os.path.join(git_repo_path, """src""", """diffusers""")
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
A : List[Any] = find_backend(" if not is_torch_available():" )
self.assertEqual(__lowerCamelCase , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A : List[Any] = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(__lowerCamelCase , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A : Optional[int] = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(__lowerCamelCase , "torch_and_transformers_and_onnx" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
A : List[str] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , __lowerCamelCase )
self.assertIn("torch_and_transformers" , __lowerCamelCase )
self.assertIn("flax_and_transformers" , __lowerCamelCase )
self.assertIn("torch_and_transformers_and_onnx" , __lowerCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
A : int = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(__lowerCamelCase , "\nCONSTANT = None\n" )
A : str = create_dummy_object("function" , "'torch'" )
self.assertEqual(
__lowerCamelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
A : Optional[int] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
A : Union[str, Any] = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
A : Optional[int] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
A : str = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , __lowerCamelCase )
| 354
|
from math import factorial
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
A : Union[str, Any] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
A : Union[str, Any] = float(factorial(_lowerCamelCase ) )
coefficient /= factorial(_lowerCamelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.75))
| 256
| 0
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( _a , unittest.TestCase ):
_A : str = LayoutLMTokenizer
_A : Dict = LayoutLMTokenizerFast
_A : Dict = True
_A : List[Any] = True
def __UpperCamelCase ( self : Union[str, Any] ):
super().setUp()
SCREAMING_SNAKE_CASE:Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
SCREAMING_SNAKE_CASE:Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __UpperCamelCase ( self : Any ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:List[Any] = "UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE:str = "unwanted, running"
return input_text, output_text
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:Tuple = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE:Any = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) ,[7, 4, 5, 10, 8, 9] )
def __UpperCamelCase ( self : Tuple ):
pass
| 139
|
'''simple docstring'''
def A_ ( snake_case ):
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
SCREAMING_SNAKE_CASE:Optional[int] = sorted(string.lower() )
return len(snake_case ) == len(set(snake_case ) )
if __name__ == "__main__":
A_ = input("Enter a string ").strip()
A_ = is_isogram(input_str)
print(f'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 139
| 1
|
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a ):
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(snake_case_ ):
print(F"{i}\t\t{d}" )
def _lowerCamelCase( a , a , a ):
for j in range(snake_case_ ):
__a , __a , __a = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def _lowerCamelCase( a , a , a , a ):
__a = [float("inf" )] * vertex_count
__a = 0.0
for _ in range(vertex_count - 1 ):
for j in range(snake_case_ ):
__a , __a , __a = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
__a = distance[u] + w
__a = check_negative_cycle(snake_case_ , snake_case_ , snake_case_ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__:Tuple = int(input("""Enter number of vertices: """).strip())
SCREAMING_SNAKE_CASE__:Dict = int(input("""Enter number of edges: """).strip())
SCREAMING_SNAKE_CASE__:list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
SCREAMING_SNAKE_CASE__:str = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
SCREAMING_SNAKE_CASE__:int = {'src': src, 'dst': dest, 'weight': weight}
SCREAMING_SNAKE_CASE__:Any = int(input("""\nEnter shortest path source:""").strip())
SCREAMING_SNAKE_CASE__:int = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 368
|
"""simple docstring"""
SCREAMING_SNAKE_CASE__:Any = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
SCREAMING_SNAKE_CASE__:Any = {value: key for key, value in MORSE_CODE_DICT.items()}
def _lowerCamelCase( a ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def _lowerCamelCase( a ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def _lowerCamelCase( ):
__a = "Morse code here!"
print(a )
__a = encrypt(a )
print(a )
__a = decrypt(a )
print(a )
if __name__ == "__main__":
main()
| 268
| 0
|
'''simple docstring'''
def _UpperCamelCase ( __A ) -> list:
'''simple docstring'''
def merge(__A , __A ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(__A ) <= 1:
return collection
UpperCamelCase__ = len(__A ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : List[str] = input('Enter numbers separated by a comma:\n').strip()
a__ : Tuple = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 80
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Any = logging.get_logger(__name__)
a__ : str = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class lowercase_ ( a__ ):
__UpperCAmelCase = 'lilt'
def __init__( self , a=3_05_22 , a=7_68 , a=12 , a=12 , a=30_72 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=2 , a=0.02 , a=1e-12 , a=0 , a="absolute" , a=None , a=4 , a=10_24 , **a , ):
super().__init__(pad_token_id=a , **a )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = classifier_dropout
UpperCamelCase__ = channel_shrink_ratio
UpperCamelCase__ = max_ad_position_embeddings
| 80
| 1
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : int, __A : Tuple, __A : int ):
UpperCAmelCase : Optional[Any] = params
UpperCAmelCase : str = np.array(__A )
UpperCAmelCase : List[Any] = np.array([len(__A ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Tuple, __A : Any ):
return (self.token_ids[index], self.lengths[index])
def __len__( self : Tuple ):
return len(self.lengths )
def __magic_name__ ( self : Union[str, Any] ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Optional[Any] = self.params.max_model_input_size
UpperCAmelCase : Dict = self.lengths > max_len
logger.info(F'''Splitting {sum(__A )} too long sequences.''' )
def divide_chunks(__A : int, __A : Tuple ):
return [l[i : i + n] for i in range(0, len(__A ), __A )]
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Optional[int] = []
if self.params.mlm:
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
UpperCAmelCase , UpperCAmelCase : List[str] = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids, self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
UpperCAmelCase : Optional[int] = []
for sub_s in divide_chunks(seq_, max_len - 2 ):
if sub_s[0] != cls_id:
UpperCAmelCase : List[Any] = np.insert(__A, 0, __A )
if sub_s[-1] != sep_id:
UpperCAmelCase : Optional[int] = np.insert(__A, len(__A ), __A )
assert len(__A ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__A )
new_tok_ids.extend(__A )
new_lengths.extend([len(__A ) for l in sub_seqs] )
UpperCAmelCase : List[str] = np.array(__A )
UpperCAmelCase : List[Any] = np.array(__A )
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = len(self )
UpperCAmelCase : str = self.lengths > 1_1
UpperCAmelCase : Optional[Any] = self.token_ids[indices]
UpperCAmelCase : str = self.lengths[indices]
UpperCAmelCase : Union[str, Any] = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def __magic_name__ ( self : str ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
UpperCAmelCase : Dict = self.params.special_tok_ids['''unk_token''']
UpperCAmelCase : List[Any] = len(self )
UpperCAmelCase : List[Any] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
UpperCAmelCase : Optional[Any] = (unk_occs / self.lengths) < 0.5
UpperCAmelCase : List[Any] = self.token_ids[indices]
UpperCAmelCase : int = self.lengths[indices]
UpperCAmelCase : List[str] = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def __magic_name__ ( self : List[Any] ):
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __magic_name__ ( self : List[Any], __A : Tuple ):
UpperCAmelCase : Dict = [t[0] for t in batch]
UpperCAmelCase : List[str] = [t[1] for t in batch]
assert len(__A ) == len(__A )
# Max for paddings
UpperCAmelCase : Union[str, Any] = max(__A )
# Pad token ids
if self.params.mlm:
UpperCAmelCase : List[Any] = self.params.special_tok_ids['''pad_token''']
else:
UpperCAmelCase : List[str] = self.params.special_tok_ids['''unk_token''']
UpperCAmelCase : List[str] = [list(t.astype(__A ) ) + [pad_idx] * (max_seq_len_ - len(__A )) for t in token_ids]
assert len(tk_ ) == len(__A )
assert all(len(__A ) == max_seq_len_ for t in tk_ )
UpperCAmelCase : Optional[Any] = torch.tensor(tk_ ) # (bs, max_seq_len_)
UpperCAmelCase : Dict = torch.tensor(__A ) # (bs)
return tk_t, lg_t
| 99
|
import torch
from torch import nn
class __UpperCAmelCase ( nn.Module ):
def __init__( self : List[Any], __A : List[Any], __A : Optional[Any], __A : int, __A : List[Any], __A : int=1, __A : List[str]=False ):
super().__init__()
UpperCAmelCase : Union[str, Any] = n_token
UpperCAmelCase : List[str] = d_embed
UpperCAmelCase : Dict = d_proj
UpperCAmelCase : List[Any] = cutoffs + [n_token]
UpperCAmelCase : Dict = [0] + self.cutoffs
UpperCAmelCase : int = div_val
UpperCAmelCase : Union[str, Any] = self.cutoffs[0]
UpperCAmelCase : str = len(self.cutoffs ) - 1
UpperCAmelCase : Optional[int] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase : str = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed ) )
UpperCAmelCase : List[str] = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCAmelCase : Dict = nn.ModuleList()
UpperCAmelCase : Optional[int] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__A, __A ) ) )
else:
self.out_projs.append(__A )
self.out_layers.append(nn.Linear(__A, __A ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase , UpperCAmelCase : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase : str = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__A, __A ) ) )
self.out_layers.append(nn.Linear(__A, r_idx - l_idx ) )
UpperCAmelCase : Optional[int] = keep_order
def __magic_name__ ( self : Union[str, Any], __A : List[str], __A : Any, __A : Dict, __A : Optional[Any] ):
if proj is None:
UpperCAmelCase : List[Any] = nn.functional.linear(__A, __A, bias=__A )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase : Union[str, Any] = nn.functional.linear(__A, proj.t().contiguous() )
UpperCAmelCase : Optional[int] = nn.functional.linear(__A, __A, bias=__A )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def __magic_name__ ( self : int, __A : int, __A : List[Any]=None, __A : Dict=False ):
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase : List[Any] = hidden[..., :-1, :].contiguous()
UpperCAmelCase : Any = labels[..., 1:].contiguous()
UpperCAmelCase : Optional[Any] = hidden.view(-1, hidden.size(-1 ) )
UpperCAmelCase : Union[str, Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
UpperCAmelCase : str = hidden.view(-1, hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCAmelCase : List[str] = self._compute_logit(__A, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
if labels is not None:
UpperCAmelCase : Optional[int] = labels != -1_0_0
UpperCAmelCase : Dict = torch.zeros_like(__A, dtype=hidden.dtype, device=hidden.device )
UpperCAmelCase : Any = (
-nn.functional.log_softmax(__A, dim=-1 )[mask].gather(1, labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCAmelCase : Any = nn.functional.log_softmax(__A, dim=-1 )
else:
# construct weights and biases
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase : List[str] = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase : Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase : List[Any] = self.out_layers[i].weight
UpperCAmelCase : Dict = self.out_layers[i].bias
if i == 0:
UpperCAmelCase : List[str] = torch.cat([weight_i, self.cluster_weight], dim=0 )
UpperCAmelCase : List[Any] = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(__A )
biases.append(__A )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase : Dict = self._compute_logit(__A, __A, __A, __A )
UpperCAmelCase : int = nn.functional.log_softmax(__A, dim=1 )
if labels is None:
UpperCAmelCase : Optional[int] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCAmelCase : Union[str, Any] = torch.zeros_like(__A, dtype=hidden.dtype, device=hidden.device )
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : Any = [0] + self.cutoffs
for i in range(len(__A ) - 1 ):
UpperCAmelCase , UpperCAmelCase : Optional[int] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase : List[str] = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase : Tuple = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase : Any = labels.index_select(0, __A ) - l_idx
UpperCAmelCase : Dict = head_logprob.index_select(0, __A )
UpperCAmelCase : List[str] = hidden.index_select(0, __A )
else:
UpperCAmelCase : Tuple = hidden
if i == 0:
if labels is not None:
UpperCAmelCase : Union[str, Any] = head_logprob_i.gather(1, target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase : Optional[int] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase : List[str] = self._compute_logit(__A, __A, __A, __A )
UpperCAmelCase : Dict = nn.functional.log_softmax(__A, dim=1 )
UpperCAmelCase : int = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase : Union[str, Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1, target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase : int = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase : Optional[Any] = logprob_i
if labels is not None:
if (hasattr(self, '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0, __A, -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def __magic_name__ ( self : Tuple, __A : List[str] ):
if self.n_clusters == 0:
UpperCAmelCase : int = self._compute_logit(__A, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
return nn.functional.log_softmax(__A, dim=-1 )
else:
# construct weights and biases
UpperCAmelCase , UpperCAmelCase : Any = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase , UpperCAmelCase : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase : List[Any] = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase : List[str] = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase : List[str] = self.out_layers[i].weight
UpperCAmelCase : Dict = self.out_layers[i].bias
if i == 0:
UpperCAmelCase : Dict = torch.cat([weight_i, self.cluster_weight], dim=0 )
UpperCAmelCase : str = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(__A )
biases.append(__A )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase : int = self._compute_logit(__A, __A, __A, __A )
UpperCAmelCase : Optional[int] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCAmelCase : Dict = nn.functional.log_softmax(__A, dim=1 )
UpperCAmelCase : List[str] = [0] + self.cutoffs
for i in range(len(__A ) - 1 ):
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase : Any = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase : Tuple = self._compute_logit(__A, __A, __A, __A )
UpperCAmelCase : List[Any] = nn.functional.log_softmax(__A, dim=1 )
UpperCAmelCase : Optional[int] = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase : Optional[Any] = logprob_i
return out
| 99
| 1
|
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : int = """T5Config"""
class A__(SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_A : Optional[Any] = '''mt5'''
_A : List[str] = MTaConfig
class A__(SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_A : List[Any] = '''mt5'''
_A : Optional[Any] = MTaConfig
class A__(SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_A : Dict = '''mt5'''
_A : Any = MTaConfig
| 248
|
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Any = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Dict = {}
__UpperCamelCase : Dict = os.path.join(snake_case__ , "all_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
__UpperCamelCase : Any = json.load(snake_case__ )
else:
raise ValueError(F"can't find {path}" )
return results
def __lowerCAmelCase ( ):
__UpperCamelCase : Any = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@classmethod
def a_ (cls ) -> Union[str, Any]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCamelCase : List[str] = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
__UpperCamelCase : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def a_ (cls ) -> Union[str, Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : int = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 2_8 )
self.assertGreaterEqual(result["eval_exact"] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : str = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Dict = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_rouge1"] , 1_0 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_bleu"] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "translation_no_trainer" ) ) )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCAmelCase )
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : str = get_results(_UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "image_classification_no_trainer" ) ) )
| 298
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
lowerCamelCase__ : Optional[int] = [p / w for p, w in zip(_lowerCamelCase , _lowerCamelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
lowerCamelCase__ : Tuple = sorted(_lowerCamelCase )
# declaring useful variables
lowerCamelCase__ : Tuple = len(_lowerCamelCase )
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : int = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
lowerCamelCase__ : Union[str, Any] = sorted_profit_by_weight[length - i - 1]
lowerCamelCase__ : Optional[Any] = profit_by_weight.index(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"Input profits, weights, and then max_weight (all positive ints) separated by "
"spaces."
)
A_ : str = [int(x) for x in input("Input profits separated by spaces: ").split()]
A_ : Optional[int] = [int(x) for x in input("Input weights separated by spaces: ").split()]
A_ : Union[str, Any] = int(input("Max weight allowed: "))
# Function Call
calc_profit(profit, weight, max_weight)
| 316
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["CLIPFeatureExtractor"]
A_ : Any = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
'''simple docstring'''
try:
lowercase = int(a_ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
lowercase = 1
lowercase = 2
while i * i <= n:
while n % i == 0:
lowercase = i
n //= i
i += 1
if n > 1:
lowercase = n
return int(a_ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 197
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def __A ( a_ :List[Any]) -> Any:
__a : Optional[int] = SwinvaConfig()
__a : Optional[Any] = swinva_name.split('''_''')
__a : str = name_split[1]
if "to" in name_split[3]:
__a : Any = int(name_split[3][-3:])
else:
__a : str = int(name_split[3])
if "to" in name_split[2]:
__a : str = int(name_split[2][-2:])
else:
__a : Union[str, Any] = int(name_split[2][6:])
if model_size == "tiny":
__a : str = 96
__a : List[Any] = (2, 2, 6, 2)
__a : Any = (3, 6, 12, 24)
elif model_size == "small":
__a : int = 96
__a : int = (2, 2, 18, 2)
__a : List[Any] = (3, 6, 12, 24)
elif model_size == "base":
__a : List[str] = 1_28
__a : List[Any] = (2, 2, 18, 2)
__a : str = (4, 8, 16, 32)
else:
__a : str = 1_92
__a : List[str] = (2, 2, 18, 2)
__a : List[str] = (6, 12, 24, 48)
if "to" in swinva_name:
__a : Tuple = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__a : str = 2_18_41
__a : Any = '''huggingface/label-files'''
__a : Any = '''imagenet-22k-id2label.json'''
__a : Optional[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
__a : Any = {int(a_): v for k, v in idalabel.items()}
__a : Dict = idalabel
__a : Dict = {v: k for k, v in idalabel.items()}
else:
__a : List[Any] = 10_00
__a : Any = '''huggingface/label-files'''
__a : Union[str, Any] = '''imagenet-1k-id2label.json'''
__a : Tuple = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
__a : Optional[Any] = {int(a_): v for k, v in idalabel.items()}
__a : Dict = idalabel
__a : Optional[Any] = {v: k for k, v in idalabel.items()}
__a : Any = img_size
__a : Tuple = num_classes
__a : str = embed_dim
__a : List[str] = depths
__a : Dict = num_heads
__a : List[Any] = window_size
return config
def __A ( a_ :Optional[int]) -> Dict:
if "patch_embed.proj" in name:
__a : Tuple = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''')
if "patch_embed.norm" in name:
__a : Any = name.replace('''patch_embed.norm''' , '''embeddings.norm''')
if "layers" in name:
__a : Optional[int] = '''encoder.''' + name
if "attn.proj" in name:
__a : Tuple = name.replace('''attn.proj''' , '''attention.output.dense''')
if "attn" in name:
__a : Dict = name.replace('''attn''' , '''attention.self''')
if "norm1" in name:
__a : List[Any] = name.replace('''norm1''' , '''layernorm_before''')
if "norm2" in name:
__a : Tuple = name.replace('''norm2''' , '''layernorm_after''')
if "mlp.fc1" in name:
__a : Any = name.replace('''mlp.fc1''' , '''intermediate.dense''')
if "mlp.fc2" in name:
__a : str = name.replace('''mlp.fc2''' , '''output.dense''')
if "q_bias" in name:
__a : Any = name.replace('''q_bias''' , '''query.bias''')
if "k_bias" in name:
__a : Tuple = name.replace('''k_bias''' , '''key.bias''')
if "v_bias" in name:
__a : List[Any] = name.replace('''v_bias''' , '''value.bias''')
if "cpb_mlp" in name:
__a : Union[str, Any] = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''')
if name == "norm.weight":
__a : Union[str, Any] = '''layernorm.weight'''
if name == "norm.bias":
__a : Optional[Any] = '''layernorm.bias'''
if "head" in name:
__a : Optional[int] = name.replace('''head''' , '''classifier''')
else:
__a : Optional[Any] = '''swinv2.''' + name
return name
def __A ( a_ :Dict , a_ :Dict) -> Dict:
for key in orig_state_dict.copy().keys():
__a : Optional[int] = orig_state_dict.pop(a_)
if "mask" in key:
continue
elif "qkv" in key:
__a : Dict = key.split('''.''')
__a : Union[str, Any] = int(key_split[1])
__a : List[str] = int(key_split[3])
__a : Optional[int] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__a : int = val[:dim, :]
__a : Any = val[dim : dim * 2, :]
__a : Dict = val[-dim:, :]
else:
__a : str = val[:dim]
__a : Optional[int] = val[
dim : dim * 2
]
__a : List[Any] = val[-dim:]
else:
__a : Any = val
return orig_state_dict
def __A ( a_ :Tuple , a_ :int) -> Union[str, Any]:
__a : Dict = timm.create_model(a_ , pretrained=a_)
timm_model.eval()
__a : int = get_swinva_config(a_)
__a : int = SwinvaForImageClassification(a_)
model.eval()
__a : Dict = convert_state_dict(timm_model.state_dict() , a_)
model.load_state_dict(a_)
__a : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a : int = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''')))
__a : Optional[Any] = Image.open(requests.get(a_ , stream=a_).raw)
__a : Optional[Any] = image_processor(images=a_ , return_tensors='''pt''')
__a : Optional[Any] = timm_model(inputs['''pixel_values'''])
__a : int = model(**a_).logits
assert torch.allclose(a_ , a_ , atol=1e-3)
print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(a_)
print(F"""Saving image processor to {pytorch_dump_folder_path}""")
image_processor.save_pretrained(a_)
model.push_to_hub(
repo_path_or_name=Path(a_ , a_) , organization='''nandwalritik''' , commit_message='''Add model''' , )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 160
| 0
|
"""simple docstring"""
import os
from distutils.util import strtobool
def UpperCAmelCase_ (_lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] ):
for e in env_keys:
__UpperCamelCase : int = int(os.environ.get(UpperCAmelCase_ , -1 ) )
if val >= 0:
return val
return default
def UpperCAmelCase_ (_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int=False ):
__UpperCamelCase : Dict = os.environ.get(UpperCAmelCase_ , str(UpperCAmelCase_ ) )
return strtobool(UpperCAmelCase_ ) == 1 # As its name indicates `strtobool` actually returns an int...
def UpperCAmelCase_ (_lowerCAmelCase : Dict , _lowerCAmelCase : Tuple="no" ):
__UpperCamelCase : Optional[Any] = os.environ.get(UpperCAmelCase_ , str(UpperCAmelCase_ ) )
return value
| 357
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 171
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Any = UniSpeechSatForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = downstream_dict['projector.weight']
__a : Dict = downstream_dict['projector.bias']
__a : int = downstream_dict['model.post_net.linear.weight']
__a : List[str] = downstream_dict['model.post_net.linear.bias']
return model
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str ):
__a : Tuple = UniSpeechSatForAudioFrameClassification.from_pretrained(_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE )
__a : Tuple = downstream_dict['model.linear.weight']
__a : str = downstream_dict['model.linear.bias']
return model
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple ):
__a : Union[str, Any] = UniSpeechSatForXVector.from_pretrained(_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE )
__a : List[Any] = downstream_dict['connector.weight']
__a : Union[str, Any] = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__a : List[Any] = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
__a : str = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
__a : Optional[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
__a : List[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
__a : Optional[int] = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
__a : Union[str, Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
__a : List[str] = downstream_dict['objective.W']
return model
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Tuple = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
__a : List[Any] = checkpoint['Downstream']
__a : Any = UniSpeechSatConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__a : Tuple = WavaVecaFeatureExtractor.from_pretrained(
_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE )
__a : str = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
__a : Any = convert_classification(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif arch.endswith('ForAudioFrameClassification' ):
__a : Union[str, Any] = convert_diarization(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif arch.endswith('ForXVector' ):
__a : List[Any] = convert_xvector(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
__a : int = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__lowercase : Optional[int] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 27
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272
| 0
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowerCAmelCase_ ( __lowerCAmelCase )-> Tuple:
'''simple docstring'''
return x + 2
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[Any] ='''x = 3'''
UpperCAmelCase : Tuple ={}
UpperCAmelCase : Union[str, Any] =evaluate(a__ , {} , state=a__ )
assert result == 3
self.assertDictEqual(a__ , {'''x''': 3} )
UpperCAmelCase : Union[str, Any] ='''x = y'''
UpperCAmelCase : Tuple ={'''y''': 5}
UpperCAmelCase : int =evaluate(a__ , {} , state=a__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a__ , {'''x''': 5, '''y''': 5} )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Dict ='''y = add_two(x)'''
UpperCAmelCase : int ={'''x''': 3}
UpperCAmelCase : Tuple =evaluate(a__ , {'''add_two''': add_two} , state=a__ )
assert result == 5
self.assertDictEqual(a__ , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
UpperCAmelCase : str =evaluate(a__ , {} , state=a__ )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[Any] ='''x = 3'''
UpperCAmelCase : Dict ={}
UpperCAmelCase : int =evaluate(a__ , {} , state=a__ )
assert result == 3
self.assertDictEqual(a__ , {'''x''': 3} )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int ='''test_dict = {\'x\': x, \'y\': add_two(x)}'''
UpperCAmelCase : int ={'''x''': 3}
UpperCAmelCase : int =evaluate(a__ , {'''add_two''': add_two} , state=a__ )
self.assertDictEqual(a__ , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(a__ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : str ='''x = 3\ny = 5'''
UpperCAmelCase : Optional[Any] ={}
UpperCAmelCase : List[str] =evaluate(a__ , {} , state=a__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a__ , {'''x''': 3, '''y''': 5} )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : str ='''text = f\'This is x: {x}.\''''
UpperCAmelCase : Optional[int] ={'''x''': 3}
UpperCAmelCase : Tuple =evaluate(a__ , {} , state=a__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(a__ , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Dict ='''if x <= 3:\n y = 2\nelse:\n y = 5'''
UpperCAmelCase : int ={'''x''': 3}
UpperCAmelCase : Optional[Any] =evaluate(a__ , {} , state=a__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(a__ , {'''x''': 3, '''y''': 2} )
UpperCAmelCase : Optional[Any] ={'''x''': 8}
UpperCAmelCase : Any =evaluate(a__ , {} , state=a__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a__ , {'''x''': 8, '''y''': 5} )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] ='''test_list = [x, add_two(x)]'''
UpperCAmelCase : Optional[int] ={'''x''': 3}
UpperCAmelCase : Tuple =evaluate(a__ , {'''add_two''': add_two} , state=a__ )
self.assertListEqual(a__ , [3, 5] )
self.assertDictEqual(a__ , {'''x''': 3, '''test_list''': [3, 5]} )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Tuple ='''y = x'''
UpperCAmelCase : str ={'''x''': 3}
UpperCAmelCase : Optional[int] =evaluate(a__ , {} , state=a__ )
assert result == 3
self.assertDictEqual(a__ , {'''x''': 3, '''y''': 3} )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Tuple ='''test_list = [x, add_two(x)]\ntest_list[1]'''
UpperCAmelCase : List[Any] ={'''x''': 3}
UpperCAmelCase : List[Any] =evaluate(a__ , {'''add_two''': add_two} , state=a__ )
assert result == 5
self.assertDictEqual(a__ , {'''x''': 3, '''test_list''': [3, 5]} )
UpperCAmelCase : Any ='''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
UpperCAmelCase : Tuple ={'''x''': 3}
UpperCAmelCase : Optional[Any] =evaluate(a__ , {'''add_two''': add_two} , state=a__ )
assert result == 5
self.assertDictEqual(a__ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : str ='''x = 0\nfor i in range(3):\n x = i'''
UpperCAmelCase : Tuple ={}
UpperCAmelCase : Tuple =evaluate(a__ , {'''range''': range} , state=a__ )
assert result == 2
self.assertDictEqual(a__ , {'''x''': 2, '''i''': 2} )
| 359
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case = logging.get_logger(__name__)
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""pixel_values"""]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = None , snake_case__ = True , **snake_case__ , ) -> None:
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase : List[str] =size if size is not None else {'''height''': 384, '''width''': 384}
UpperCAmelCase : List[str] =get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCAmelCase : List[str] =do_resize
UpperCAmelCase : Tuple =size
UpperCAmelCase : Optional[Any] =resample
UpperCAmelCase : Optional[Any] =do_rescale
UpperCAmelCase : Dict =rescale_factor
UpperCAmelCase : Union[str, Any] =do_normalize
UpperCAmelCase : Dict =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase : Any =image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase : List[Any] =do_convert_rgb
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase : int =get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
UpperCAmelCase : Union[str, Any] =(size['''height'''], size['''width'''])
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ) -> Optional[int]:
'''simple docstring'''
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ) -> np.ndarray:
'''simple docstring'''
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase : List[str] =do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : Union[str, Any] =resample if resample is not None else self.resample
UpperCAmelCase : Any =do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : int =rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : Union[str, Any] =do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : List[str] =image_mean if image_mean is not None else self.image_mean
UpperCAmelCase : List[Any] =image_std if image_std is not None else self.image_std
UpperCAmelCase : Optional[Any] =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase : List[Any] =size if size is not None else self.size
UpperCAmelCase : Tuple =get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCAmelCase : int =make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase : Optional[int] =[convert_to_rgb(snake_case__ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase : str =[to_numpy_array(snake_case__ ) for image in images]
if do_resize:
UpperCAmelCase : List[Any] =[self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_rescale:
UpperCAmelCase : int =[self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
UpperCAmelCase : Dict =[self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
UpperCAmelCase : Optional[int] =[to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
UpperCAmelCase : str =BatchFeature(data={'''pixel_values''': images} , tensor_type=snake_case__ )
return encoded_outputs
| 78
| 0
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
for param in module.parameters():
__UpperCamelCase =False
def _UpperCAmelCase ( ):
__UpperCamelCase ='cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__UpperCamelCase ='mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =plt.imshow(SCREAMING_SNAKE_CASE__ )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE__ )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE__ )
plt.show()
def _UpperCAmelCase ( ):
__UpperCamelCase =datetime.now()
__UpperCamelCase =current_time.strftime('%H:%M:%S' )
return timestamp
| 62
|
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 301
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCamelCase = Features({'audio': Audio()} )
lowerCamelCase = Features({'transcription': Value('string' )} )
lowerCamelCase = 'audio'
lowerCamelCase = 'transcription'
def snake_case__ ( self : Optional[int],lowercase_ : Any )-> List[Any]:
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(F'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column],lowercase_ ):
raise ValueError(F'Column {self.audio_column} is not an Audio type.' )
A__ = copy.deepcopy(self )
A__ = self.input_schema.copy()
A__ = features[self.audio_column]
A__ = input_schema
return task_template
@property
def snake_case__ ( self : Optional[int] )-> Dict[str, str]:
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 363
|
import random
def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : str ) -> tuple:
'''simple docstring'''
A__ , A__ , A__ = [], [], []
for element in data:
if element < pivot:
less.append(SCREAMING_SNAKE_CASE__ )
elif element > pivot:
greater.append(SCREAMING_SNAKE_CASE__ )
else:
equal.append(SCREAMING_SNAKE_CASE__ )
return less, equal, greater
def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
if index >= len(SCREAMING_SNAKE_CASE__ ) or index < 0:
return None
A__ = items[random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )]
A__ = 0
A__ , A__ , A__ = _partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = len(SCREAMING_SNAKE_CASE__ )
A__ = len(SCREAMING_SNAKE_CASE__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# must be in larger
else:
return quick_select(SCREAMING_SNAKE_CASE__ , index - (m + count) )
| 282
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 217
|
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# Construct model
if gpta_config_file == "":
__lowerCAmelCase: Optional[int] = GPTaConfig()
else:
__lowerCAmelCase: List[str] = GPTaConfig.from_json_file(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = GPTaModel(__SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_gpta(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
__lowerCAmelCase: str = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
__lowerCAmelCase: List[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(__SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
__A = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 217
| 1
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase ( __magic_name__ : Any , __magic_name__ : List[Any]=False ) -> Union[str, Any]:
"""simple docstring"""
try:
lowercase__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ = strtobool(a_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
A : Optional[int] = parse_flag_from_env('RUN_SLOW', default=False)
def UpperCamelCase ( __magic_name__ : Any ) -> Tuple:
"""simple docstring"""
return unittest.skip("""Test was skipped""" )(a_ )
def UpperCamelCase ( __magic_name__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(a_ )
def UpperCamelCase ( __magic_name__ : Union[str, Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(a_ )
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(a_ )
def UpperCamelCase ( __magic_name__ : Any ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(a_ )
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(a_ )
def UpperCamelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(a_ )
def UpperCamelCase ( __magic_name__ : Any ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(a_ )
def UpperCamelCase ( __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(a_ )
def UpperCamelCase ( __magic_name__ : int ) -> str:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(a_ )
def UpperCamelCase ( __magic_name__ : Union[str, Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(a_ )
def UpperCamelCase ( __magic_name__ : Tuple ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(a_ )
def UpperCamelCase ( __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(a_ )
def UpperCamelCase ( __magic_name__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(a_ )
def UpperCamelCase ( __magic_name__ : Dict ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(a_ )
def UpperCamelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(a_ )
def UpperCamelCase ( __magic_name__ : int=None , __magic_name__ : Union[str, Any]=None ) -> List[Any]:
"""simple docstring"""
if test_case is None:
return partial(a_ , version=a_ )
return unittest.skipUnless(is_torch_version(""">=""" , a_ ) , f'''test requires torch version >= {version}''' )(a_ )
def UpperCamelCase ( __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(a_ )
def UpperCamelCase ( __magic_name__ : int ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(a_ )
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(a_ )
A : List[Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase ( __magic_name__ : Dict ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(a_ )
class A ( unittest.TestCase ):
'''simple docstring'''
A__ = True
@classmethod
def lowerCamelCase__ (cls : str ) -> str:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
@classmethod
def lowerCamelCase__ (cls : int ) -> Tuple:
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCamelCase__ (self : Optional[int] ) -> str:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowerCAmelCase__ )
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Tuple ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Union[mock.Mock, List[mock.Mock]] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = mocks if isinstance(lowerCAmelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def UpperCamelCase ( __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = AcceleratorState()
lowercase__ = tensor[None].clone().to(state.device )
lowercase__ = gather(a_ ).cpu()
lowercase__ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , a_ ):
return False
return True
class A :
'''simple docstring'''
def __init__(self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = returncode
lowercase__ = stdout
lowercase__ = stderr
async def UpperCamelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] ) -> int:
"""simple docstring"""
while True:
lowercase__ = await stream.readline()
if line:
callback(a_ )
else:
break
async def UpperCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Dict=None , __magic_name__ : Dict=None , __magic_name__ : List[Any]=None , __magic_name__ : Optional[Any]=False , __magic_name__ : Optional[Any]=False ) -> List[Any]:
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(a_ ) )
lowercase__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=a_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=a_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ = []
lowercase__ = []
def tee(__magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]="" ):
lowercase__ = line.decode("""utf-8""" ).rstrip()
sink.append(a_ )
if not quiet:
print(a_ , a_ , file=a_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __magic_name__ : tee(a_ , a_ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __magic_name__ : tee(a_ , a_ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=a_ , )
return _RunOutput(await p.wait() , a_ , a_ )
def UpperCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any]=None , __magic_name__ : int=None , __magic_name__ : Any=180 , __magic_name__ : Optional[int]=False , __magic_name__ : int=True ) -> Optional[int]:
"""simple docstring"""
lowercase__ = asyncio.get_event_loop()
lowercase__ = loop.run_until_complete(
_stream_subprocess(a_ , env=a_ , stdin=a_ , timeout=a_ , quiet=a_ , echo=a_ ) )
lowercase__ = " ".join(a_ )
if result.returncode > 0:
lowercase__ = "\n".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class A ( UpperCAmelCase__ ):
'''simple docstring'''
pass
def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : str=False ) -> Any:
"""simple docstring"""
try:
lowercase__ = subprocess.check_output(a_ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(a_ , """decode""" ):
lowercase__ = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(a_ )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 367
|
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
'''simple docstring'''
def __init__(self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=13 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : Union[str, Any]=[10, 20, 30, 40] , _UpperCAmelCase : Optional[int]=[2, 2, 3, 2] , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Tuple=37 , _UpperCAmelCase : str="gelu" , _UpperCAmelCase : Dict=10 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : List[Any]=["stage2", "stage3", "stage4"] , _UpperCAmelCase : List[Any]=[2, 3, 4] , _UpperCAmelCase : Optional[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = num_stages
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = initializer_range
lowercase__ = out_features
lowercase__ = out_indices
lowercase__ = scope
def lowerCamelCase__ (self : Any ) -> Dict:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ (self : int ) -> Dict:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
lowercase__ = ConvNextVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase__ (self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ = ConvNextVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ = None
lowercase__ = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase__ (self : List[Any] ) -> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
def lowerCamelCase__ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
A__ = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
A__ = False
A__ = False
A__ = False
A__ = False
A__ = False
def lowerCamelCase__ (self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ = ConvNextVaModelTester(self )
lowercase__ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ (self : Tuple ) -> int:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def lowerCamelCase__ (self : int ) -> str:
"""simple docstring"""
pass
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ = True
if model_class.__name__ in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]:
continue
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
lowercase__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
lowercase__ = model(**_UpperCAmelCase ).loss
loss.backward()
def lowerCamelCase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ = False
lowercase__ = True
if (
model_class.__name__
in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
lowercase__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
lowercase__ = model(**_UpperCAmelCase ).loss
loss.backward()
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> str:
"""simple docstring"""
def check_hidden_states_output(_UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ):
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ConvNextVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCamelCase ( ) -> int:
"""simple docstring"""
lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase__ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ (self : Any ) -> Any:
"""simple docstring"""
lowercase__ = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(_UpperCAmelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = preprocessor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )
# verify the logits
lowercase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
lowercase__ = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 146
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=4 , ):
A_ : Any = parent
A_ : Tuple = batch_size
A_ : int = seq_length
A_ : Tuple = is_training
A_ : str = use_attention_mask
A_ : List[Any] = use_token_type_ids
A_ : str = use_labels
A_ : List[Any] = vocab_size
A_ : Optional[int] = hidden_size
A_ : str = num_hidden_layers
A_ : int = num_attention_heads
A_ : Any = intermediate_size
A_ : str = hidden_act
A_ : int = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : List[Any] = max_position_embeddings
A_ : str = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : List[Any] = initializer_range
A_ : Union[str, Any] = num_choices
def _a (self ):
A_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Dict = None
if self.use_attention_mask:
A_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Union[str, Any] = None
if self.use_token_type_ids:
A_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Any = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a (self ):
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_, A_, A_, A_ : Tuple = config_and_inputs
A_ : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _a (self ):
A_ : Tuple = self.prepare_config_and_inputs()
A_, A_, A_, A_ : Union[str, Any] = config_and_inputs
A_ : List[str] = True
A_ : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : List[Any] = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a (self ):
A_ : int = FlaxBertModelTester(self )
@slow
def _a (self ):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
A_ : int = FlaxBertModel.from_pretrained("""bert-base-cased""" )
A_ : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase )
| 206
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=3 , lowercase=True , lowercase=True , lowercase=0.1 , lowercase=0.1 , lowercase=224 , lowercase=1000 , lowercase=[3, 3, 6, 4] , lowercase=[48, 56, 112, 220] , ):
A_ : Dict = parent
A_ : List[Any] = batch_size
A_ : Dict = num_channels
A_ : Optional[Any] = is_training
A_ : List[str] = use_labels
A_ : List[Any] = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : Tuple = num_labels
A_ : List[str] = image_size
A_ : str = layer_depths
A_ : Optional[int] = embed_dims
def _a (self ):
A_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : int = None
if self.use_labels:
A_ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
A_ : int = self.get_config()
return config, pixel_values, labels
def _a (self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase , layer_scale_init_value=1E-5 , )
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[Any] = SwiftFormerModel(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Union[str, Any] = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Any = self.num_labels
A_ : Any = SwiftFormerForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : Optional[int] = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
A_ : int = SwiftFormerForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a (self ):
((A_), (A_), (A_)) : int = self.prepare_config_and_inputs()
A_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Optional[Any] = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def _a (self ):
A_ : Optional[int] = SwiftFormerModelTester(self )
A_ : Any = ConfigTester(
self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _a (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _a (self ):
pass
def _a (self ):
A_, A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(lowercase )
A_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def _a (self ):
A_, A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(lowercase )
A_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def _a (self ):
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def _a (self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = SwiftFormerModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _a (self ):
pass
def _a (self ):
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A_ : str = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
A_ : Optional[int] = model(**self._prepare_for_class(lowercase , lowercase ) )
A_ : Any = outputs.hidden_states
A_ : Any = 8
self.assertEqual(len(lowercase ) , lowercase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowercase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
A_, A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : str = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def _a (self ):
def _config_zero_init(lowercase ):
A_ : Optional[Any] = copy.deepcopy(lowercase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowercase , lowercase , 1E-10 )
if isinstance(getattr(lowercase , lowercase , lowercase ) , lowercase ):
A_ : Any = _config_zero_init(getattr(lowercase , lowercase ) )
setattr(lowercase , lowercase , lowercase )
return configs_no_init
A_, A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Any = _config_zero_init(lowercase )
for model_class in self.all_model_classes:
A_ : List[str] = model_class(config=lowercase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a (self ):
pass
def a ( ):
'''simple docstring'''
A_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a (self ):
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _a (self ):
A_ : Any = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowercase )
A_ : Dict = self.default_image_processor
A_ : Dict = prepare_img()
A_ : int = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
A_ : int = model(**lowercase )
# verify the logits
A_ : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : List[str] = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
| 206
| 1
|
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
UpperCAmelCase : Any = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Github(os.environ["""GITHUB_TOKEN"""] )
__SCREAMING_SNAKE_CASE = g.get_repo("""huggingface/transformers""" )
__SCREAMING_SNAKE_CASE = repo.get_issues(state="""open""" )
for issue in open_issues:
__SCREAMING_SNAKE_CASE = sorted([comment for comment in issue.get_comments()] , key=lambda a__ : i.created_at , reverse=a__ )
__SCREAMING_SNAKE_CASE = comments[0] if len(a__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 331
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
UpperCAmelCase : List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
lowerCAmelCase__ = 10000
lowerCAmelCase__ = None
lowerCAmelCase__ = None
class lowerCAmelCase__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCAmelCase__ = ParquetConfig
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
__SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ):
__SCREAMING_SNAKE_CASE = data_files
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__SCREAMING_SNAKE_CASE = []
for split_name, files in data_files.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = datasets.Features.from_arrow_schema(pq.read_schema(__SCREAMING_SNAKE_CASE ) )
break
splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={"""files""": files} ) )
return splits
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE = table_cast(__SCREAMING_SNAKE_CASE , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ):
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = pq.ParquetFile(__SCREAMING_SNAKE_CASE )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__SCREAMING_SNAKE_CASE = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'{file_idx}_{batch_idx}', self._cast_table(__SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE )}: {e}' )
raise
| 331
| 1
|
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ = 0 , snake_case__ = 0 ) -> Tuple:
lowerCAmelCase = right or len(A__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(A__ , A__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338
|
"""simple docstring"""
from torch import nn
def snake_case ( A__ ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 268
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class snake_case_ ( __A ):
__A : Dict = "facebook/bart-large-mnli"
__A : Any = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__A : Tuple = "text_classifier"
__A : int = AutoTokenizer
__A : List[Any] = AutoModelForSequenceClassification
__A : List[str] = ["text", ["text"]]
__A : int = ["text"]
def __UpperCamelCase ( self : Dict ) -> Any:
super().setup()
lowercase__ : Any = self.model.config
lowercase__ : Tuple = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
lowercase__ : Any = int(lowercase_ )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] ) -> Dict:
lowercase__ : Any = labels
return self.pre_processor(
[text] * len(lowercase_ ) , [F'''This example is {label}''' for label in labels] , return_tensors="pt" , padding="max_length" , )
def __UpperCamelCase ( self : int , lowercase_ : str ) -> Optional[int]:
lowercase__ : Dict = outputs.logits
lowercase__ : str = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 333
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case_ ( __A ):
__A : List[str] = "vit_mae"
def __init__( self : List[Any] , lowercase_ : List[Any]=7_68 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Optional[Any]=30_72 , lowercase_ : str="gelu" , lowercase_ : Tuple=0.0 , lowercase_ : int=0.0 , lowercase_ : Dict=0.02 , lowercase_ : int=1E-12 , lowercase_ : Tuple=2_24 , lowercase_ : Any=16 , lowercase_ : Dict=3 , lowercase_ : List[Any]=True , lowercase_ : Dict=16 , lowercase_ : List[str]=5_12 , lowercase_ : Tuple=8 , lowercase_ : Any=20_48 , lowercase_ : int=0.75 , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ) -> Optional[Any]:
super().__init__(**lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : List[Any] = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Any = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[int] = patch_size
lowercase__ : Any = num_channels
lowercase__ : str = qkv_bias
lowercase__ : Optional[Any] = decoder_num_attention_heads
lowercase__ : Any = decoder_hidden_size
lowercase__ : Any = decoder_num_hidden_layers
lowercase__ : Union[str, Any] = decoder_intermediate_size
lowercase__ : int = mask_ratio
lowercase__ : Tuple = norm_pix_loss
| 333
| 1
|
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowercase : Optional[Any] = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def A_ ( A__ , A__ , A__=None ) -> Optional[int]:
if rng is None:
a__ : Optional[Any] = random.Random()
a__ : int = 1
for dim in shape:
total_dims *= dim
a__ : Union[str, Any] = []
for _ in range(A__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
a__ : List[Any] = np.array(A__ , dtype=jnp.intaa ).reshape(A__ )
return output
def A_ ( A__ , A__=None ) -> Union[str, Any]:
a__ : str = ids_tensor(A__ , vocab_size=2 , rng=A__ )
# make sure that at least one token is attended to for each batch
a__ : str = 1
return attn_mask
@require_flax
class A__ :
"""simple docstring"""
__A : str = None
__A : Tuple = ()
def __lowercase ( self) -> int:
'''simple docstring'''
a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
a__ : Optional[Any] = 2
a__ : Dict = inputs['input_ids'].shape[-1] // 2
a__ : Optional[int] = inputs['input_ids'][:max_batch_size, :sequence_length]
a__ : Optional[Any] = jnp.ones_like(lowercase)
a__ : Optional[int] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
a__ : Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
a__ : Tuple = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ , a__ , a__ , a__ : List[str] = self._get_input_ids_and_config()
a__ : int = False
a__ : Optional[int] = max_length
a__ : Optional[int] = 0
for model_class in self.all_generative_model_classes:
a__ : Union[str, Any] = model_class(lowercase)
a__ : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
a__ : int = getattr(lowercase , lowercase)
a__ : List[Any] = pt_model_class(lowercase).eval()
a__ : Tuple = load_flax_weights_in_pytorch_model(lowercase , flax_model.params)
a__ : Optional[Any] = flax_model.generate(lowercase).sequences
a__ : List[Any] = pt_model.generate(torch.tensor(lowercase , dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
a__ : Optional[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist())
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ , a__ , a__ , a__ : Tuple = self._get_input_ids_and_config()
a__ : Optional[int] = False
a__ : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
a__ : Optional[int] = model_class(lowercase)
a__ : Optional[int] = model.generate(lowercase).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase)
a__ : List[Any] = jit(model.generate)
a__ : Any = jit_generate(lowercase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ , a__ , a__ : List[str] = self._get_input_ids_and_config()
a__ : Dict = True
a__ : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
a__ : Union[str, Any] = model_class(lowercase)
a__ : List[Any] = model.generate(lowercase).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase)
a__ : str = jit(model.generate)
a__ : str = jit_generate(lowercase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __lowercase ( self) -> str:
'''simple docstring'''
a__ , a__ , a__ , a__ : Optional[int] = self._get_input_ids_and_config()
a__ : Optional[int] = False
a__ : str = max_length
a__ : Optional[int] = 2
for model_class in self.all_generative_model_classes:
a__ : Union[str, Any] = model_class(lowercase)
a__ : str = model.generate(lowercase).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase)
a__ : List[Any] = jit(model.generate)
a__ : Dict = jit_generate(lowercase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ , a__ , a__ , a__ : Optional[Any] = self._get_input_ids_and_config()
a__ : List[str] = False
a__ : Optional[int] = max_length
a__ : Optional[Any] = 2
a__ : int = 2
for model_class in self.all_generative_model_classes:
a__ : Dict = model_class(lowercase)
a__ : Optional[Any] = model.generate(lowercase).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences)
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ , a__ , a__ , a__ : Union[str, Any] = self._get_input_ids_and_config()
a__ : int = True
a__ : Optional[Any] = max_length
a__ : List[Any] = 0.8
a__ : List[str] = 10
a__ : Tuple = 0.3
a__ : List[Any] = 1
a__ : Any = 8
a__ : Union[str, Any] = 9
for model_class in self.all_generative_model_classes:
a__ : str = model_class(lowercase)
a__ : str = model.generate(lowercase).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase)
a__ : List[str] = jit(model.generate)
a__ : Dict = jit_generate(lowercase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ , a__ , a__ , a__ : str = self._get_input_ids_and_config()
a__ : int = max_length
a__ : Dict = 1
a__ : Optional[Any] = 8
a__ : Union[str, Any] = 9
for model_class in self.all_generative_model_classes:
a__ : Union[str, Any] = model_class(lowercase)
a__ : Optional[int] = model.generate(lowercase).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase)
a__ : Tuple = jit(model.generate)
a__ : Optional[int] = jit_generate(lowercase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ , a__ , a__ : int = self._get_input_ids_and_config()
a__ : int = max_length
a__ : Optional[int] = 2
a__ : int = 1
a__ : Tuple = 8
a__ : Optional[int] = 9
for model_class in self.all_generative_model_classes:
a__ : List[str] = model_class(lowercase)
a__ : List[Any] = model.generate(lowercase).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase)
a__ : Any = jit(model.generate)
a__ : Dict = jit_generate(lowercase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __lowercase ( self) -> str:
'''simple docstring'''
a__ , a__ , a__ , a__ : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
a__ : int = attention_mask.at[(0, 0)].set(0)
a__ : Union[str, Any] = False
a__ : Dict = max_length
for model_class in self.all_generative_model_classes:
a__ : Optional[Any] = model_class(lowercase)
a__ : str = model.generate(lowercase , attention_mask=lowercase).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase)
a__ : Union[str, Any] = jit(model.generate)
a__ : Union[str, Any] = jit_generate(lowercase , attention_mask=lowercase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ , a__ , a__ : Dict = self._get_input_ids_and_config()
# pad attention mask on the left
a__ : Union[str, Any] = attention_mask.at[(0, 0)].set(0)
a__ : Union[str, Any] = True
a__ : Tuple = max_length
for model_class in self.all_generative_model_classes:
a__ : List[str] = model_class(lowercase)
a__ : int = model.generate(lowercase , attention_mask=lowercase).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase)
a__ : Optional[int] = jit(model.generate)
a__ : Any = jit_generate(lowercase , attention_mask=lowercase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ , a__ , a__ , a__ : str = self._get_input_ids_and_config()
# pad attention mask on the left
a__ : List[Any] = attention_mask.at[(0, 0)].set(0)
a__ : int = 2
a__ : Tuple = max_length
for model_class in self.all_generative_model_classes:
a__ : int = model_class(lowercase)
a__ : Optional[int] = model.generate(lowercase , attention_mask=lowercase).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase)
a__ : Any = jit(model.generate)
a__ : Union[str, Any] = jit_generate(lowercase , attention_mask=lowercase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
@require_flax
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert')
a__ : int = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only')
a__ : Optional[int] = 'Hello world'
a__ : Any = tokenizer(lowercase , return_tensors='np').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowercase , 'do_samples'):
model.generate(lowercase , do_samples=lowercase)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowercase , 'foo'):
a__ : Dict = {'foo': 'bar'}
model.generate(lowercase , **lowercase)
| 99
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Any = logging.get_logger(__name__)
def A_ ( A__ ) -> List[str]:
a__ : Union[str, Any] = torch.load(A__ , map_location='cpu' )
if "model" in sd.keys():
a__ : Tuple = torch.load(A__ , map_location='cpu' )['model']
# pop unnecessary weights
a__ : Optional[Any] = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(A__ )
a__ : Union[str, Any] = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
a__ : Any = sd.pop(A__ )
a__ : Optional[int] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
a__ : str = sd[key]
# We split QKV in separate Q,K,V
a__ : Union[str, Any] = key.replace('.qkv_proj.' , '.q_proj.' )
a__ : int = key.replace('.qkv_proj.' , '.k_proj.' )
a__ : Optional[int] = key.replace('.qkv_proj.' , '.v_proj.' )
a__ : List[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
a__ , a__ , a__ : Any = torch.split(A__ , depth // 3 , dim=0 )
a__ : int = q
a__ : List[Any] = k
a__ : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( A__ , A__ , A__=None ) -> Union[str, Any]:
a__ : Union[str, Any] = load_checkpoint(A__ )
if config is not None:
a__ : List[Any] = OPTConfig.from_pretrained(A__ )
else:
a__ : int = OPTConfig()
a__ : Optional[int] = OPTModel(A__ ).half().eval()
model.load_state_dict(A__ )
# Check results
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
lowercase : Any = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 99
| 1
|
from __future__ import annotations
def snake_case__ ( SCREAMING_SNAKE_CASE_ : list[float] , SCREAMING_SNAKE_CASE_ : list[float] ):
'''simple docstring'''
lowercase__ : Union[str, Any] = sorted(numsa + numsa )
lowercase__ , lowercase__ : int = divmod(len(SCREAMING_SNAKE_CASE_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ = [float(x) for x in input('''Enter the elements of first array: ''').split()]
snake_case_ = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 216
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case_ = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 216
| 1
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase : Optional[int] = logging.get_logger("transformers.models.speecht5")
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
hf_model.apply_weight_norm()
lowercase :str = checkpoint["input_conv.weight_g"]
lowercase :str = checkpoint["input_conv.weight_v"]
lowercase :Optional[int] = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
lowercase :Optional[int] = checkpoint[F"upsamples.{i}.1.weight_g"]
lowercase :List[Any] = checkpoint[F"upsamples.{i}.1.weight_v"]
lowercase :List[str] = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowercase :List[str] = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
lowercase :Dict = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
lowercase :int = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
lowercase :Any = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
lowercase :Union[str, Any] = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
lowercase :Optional[int] = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
lowercase :Union[str, Any] = checkpoint["output_conv.1.weight_g"]
lowercase :Any = checkpoint["output_conv.1.weight_v"]
lowercase :List[str] = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, ):
if config_path is not None:
lowercase :Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase )
else:
lowercase :Union[str, Any] = SpeechTaHifiGanConfig()
lowercase :int = SpeechTaHifiGan(lowerCamelCase )
lowercase :Optional[Any] = torch.load(lowerCamelCase )
load_weights(orig_checkpoint["model"]["generator"], lowerCamelCase, lowerCamelCase )
lowercase :Optional[int] = np.load(lowerCamelCase )
lowercase :Tuple = stats[0].reshape(-1 )
lowercase :Union[str, Any] = stats[1].reshape(-1 )
lowercase :Tuple = torch.from_numpy(lowerCamelCase ).float()
lowercase :Dict = torch.from_numpy(lowerCamelCase ).float()
model.save_pretrained(lowerCamelCase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_UpperCAmelCase : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 236
|
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
lowercase :List[str] = ""
for word_or_phrase in separated:
if not isinstance(lowerCamelCase, lowerCamelCase ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 236
| 1
|
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case__ : int = 16
snake_case__ : Optional[Any] = 32
def _lowerCamelCase ( lowerCamelCase_ : Accelerator , lowerCamelCase_ : int = 16 ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('bert-base-cased' )
UpperCAmelCase_ : int = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCamelCase_ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Union[str, Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : Any = datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : List[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase_ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : Tuple = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : Tuple = 8
else:
UpperCAmelCase_ : List[str] = None
return tokenizer.pad(
lowerCamelCase_ , padding='longest' , max_length=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCAmelCase_ : List[str] = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ , drop_last=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def _lowerCamelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCAmelCase_ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : str = config['lr']
UpperCAmelCase_ : Optional[Any] = int(config['num_epochs'] )
UpperCAmelCase_ : List[str] = int(config['seed'] )
UpperCAmelCase_ : List[Any] = int(config['batch_size'] )
UpperCAmelCase_ : int = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase_ : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase_ : List[str] = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase_ : str = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : str = AdamW(params=model.parameters() , lr=lowerCamelCase_ )
# Instantiate scheduler
UpperCAmelCase_ : int = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.prepare(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Now we train the model
for epoch in range(lowerCamelCase_ ):
model.train()
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ : str = model(**lowerCamelCase_ )
UpperCAmelCase_ : List[str] = outputs.loss
UpperCAmelCase_ : int = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCamelCase_ , references=lowerCamelCase_ , )
UpperCAmelCase_ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowerCamelCase_ )
def _lowerCamelCase ( ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCamelCase_ , default=lowerCamelCase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
UpperCAmelCase_ : Any = parser.parse_args()
UpperCAmelCase_ : str = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 274
|
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def _UpperCamelCase ( *snake_case_ , **snake_case_ ):
'''simple docstring'''
pass
def _lowerCamelCase ( lowerCamelCase_ : Image ):
"""simple docstring"""
UpperCAmelCase_ : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ :Optional[int] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : str = DepthEstimationPipeline(model=snake_case_ , image_processor=snake_case_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , snake_case_ )
import datasets
UpperCAmelCase_ : Optional[int] = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
UpperCAmelCase_ : Union[str, Any] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , snake_case_ , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@slow
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = 'Intel/dpt-large'
UpperCAmelCase_ : Dict = pipeline('depth-estimation' , model=snake_case_ )
UpperCAmelCase_ : Optional[int] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
UpperCAmelCase_ : Union[str, Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.3_04 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.6_62 )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 274
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class __snake_case ( __UpperCAmelCase ):
lowerCAmelCase_ = "bridgetower_vision_model"
def __init__( self : Optional[Any] , _lowercase : Optional[int]=7_68 , _lowercase : List[Any]=12 , _lowercase : List[str]=3 , _lowercase : List[Any]=16 , _lowercase : List[str]=2_88 , _lowercase : Tuple=1 , _lowercase : str=1E-05 , _lowercase : Optional[int]=False , _lowercase : Dict=True , _lowercase : Tuple=False , **_lowercase : List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = stop_gradient
SCREAMING_SNAKE_CASE__ = share_layernorm
SCREAMING_SNAKE_CASE__ = remove_last_layer
@classmethod
def __a ( cls : Any , _lowercase : Optional[int] , **_lowercase : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
if config_dict.get("""model_type""" ) == "bridgetower":
SCREAMING_SNAKE_CASE__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class __snake_case ( __UpperCAmelCase ):
lowerCAmelCase_ = "bridgetower_text_model"
def __init__( self : Dict , _lowercase : Dict=5_02_65 , _lowercase : Dict=7_68 , _lowercase : str=12 , _lowercase : Optional[Any]=12 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=30_72 , _lowercase : int="gelu" , _lowercase : List[str]=0.1 , _lowercase : List[str]=0.1 , _lowercase : List[Any]=5_14 , _lowercase : Any=1 , _lowercase : Dict=1E-05 , _lowercase : Tuple=1 , _lowercase : Tuple=0 , _lowercase : Any=2 , _lowercase : Dict="absolute" , _lowercase : Any=True , **_lowercase : int , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = pad_token_id
SCREAMING_SNAKE_CASE__ = bos_token_id
SCREAMING_SNAKE_CASE__ = eos_token_id
@classmethod
def __a ( cls : Union[str, Any] , _lowercase : Optional[int] , **_lowercase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
if config_dict.get("""model_type""" ) == "bridgetower":
SCREAMING_SNAKE_CASE__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class __snake_case ( __UpperCAmelCase ):
lowerCAmelCase_ = "bridgetower"
def __init__( self : int , _lowercase : List[Any]=True , _lowercase : str="gelu" , _lowercase : Tuple=7_68 , _lowercase : Dict=1 , _lowercase : Any=1E-05 , _lowercase : Tuple=False , _lowercase : Optional[int]="add" , _lowercase : str=12 , _lowercase : List[str]=6 , _lowercase : Tuple=False , _lowercase : str=False , _lowercase : Dict=None , _lowercase : List[Any]=None , **_lowercase : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = kwargs.pop("""text_config_dict""" , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""vision_config_dict""" , UpperCamelCase__ )
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = share_cross_modal_transformer_layers
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = share_link_tower_layers
SCREAMING_SNAKE_CASE__ = link_tower_type
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = tie_word_embeddings
SCREAMING_SNAKE_CASE__ = init_layernorm_from_vision_encoder
if text_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
SCREAMING_SNAKE_CASE__ = BridgeTowerTextConfig(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = BridgeTowerVisionConfig(**UpperCamelCase__ )
@classmethod
def __a ( cls : Dict , _lowercase : Union[str, Any] , _lowercase : List[str] , **_lowercase : List[str] ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase__ )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
| 219
|
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''nielsr/canine-s''': 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
_A = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_A = 0
_A = 0xe0_00
_A = 0xe0_01
_A = 0xe0_02
_A = 0xe0_03
_A = 0xe0_04
# Maps special codepoints to human-readable names.
_A = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_A = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class A ( __UpperCAmelCase ):
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=False, UpperCamelCase__=2048, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else bos_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else eos_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else sep_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else cls_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else mask_token
super().__init__(
bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, sep_token=UpperCamelCase__, cls_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token=UpperCamelCase__, add_prefix_space=UpperCamelCase__, model_max_length=UpperCamelCase__, **UpperCamelCase__, )
# Creates a mapping for looking up the IDs of special symbols.
lowerCAmelCase_ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCAmelCase_ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCAmelCase_ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCAmelCase_ = UNICODE_VOCAB_SIZE
lowerCAmelCase_ = len(self._special_codepoints )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._unicode_vocab_size
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return list(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
try:
return ord(UpperCamelCase__ )
except TypeError:
raise ValueError(f"invalid token: '{token}'" )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCamelCase__ )
except TypeError:
raise ValueError(f"invalid id: {index}" )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return "".join(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
lowerCAmelCase_ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__, token_ids_a=UpperCamelCase__, already_has_special_tokens=UpperCamelCase__ )
lowerCAmelCase_ = [1] + ([0] * len(UpperCamelCase__ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCamelCase__ )) + [1]
return result
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
lowerCAmelCase_ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
return ()
| 278
| 0
|
import csv
import tweepy
# Twitter API credentials
_lowercase: Dict = ""
_lowercase: Optional[int] = ""
_lowercase: str = ""
_lowercase: List[str] = ""
def a( A : str ) -> None:
"""simple docstring"""
a = tweepy.OAuthHandler(A , A )
auth.set_access_token(A , A )
a = tweepy.API(A )
# initialize a list to hold all the tweepy Tweets
a = []
# make initial request for most recent tweets (200 is the maximum allowed count)
a = api.user_timeline(screen_name=A , count=200 )
# save most recent tweets
alltweets.extend(A )
# save the id of the oldest tweet less one
a = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(A ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
a = api.user_timeline(
screen_name=A , count=200 , max_id=A )
# save most recent tweets
alltweets.extend(A )
# update the id of the oldest tweet less one
a = alltweets[-1].id - 1
print(f'''...{len(A )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
a = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , "w" ) as f:
a = csv.writer(A )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(A )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 71
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 71
| 1
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase_ ( _a : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : str = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
UpperCAmelCase_ : Optional[Any] = 128
elif "12-12" in model_name:
UpperCAmelCase_ : List[str] = 12
UpperCAmelCase_ : Optional[int] = 12
elif "14-14" in model_name:
UpperCAmelCase_ : Any = 14
UpperCAmelCase_ : List[Any] = 14
elif "16-16" in model_name:
UpperCAmelCase_ : Dict = 16
UpperCAmelCase_ : Optional[int] = 16
else:
raise ValueError("""Model not supported""" )
UpperCAmelCase_ : str = """huggingface/label-files"""
if "speech-commands" in model_name:
UpperCAmelCase_ : Union[str, Any] = 35
UpperCAmelCase_ : Tuple = """speech-commands-v2-id2label.json"""
else:
UpperCAmelCase_ : int = 527
UpperCAmelCase_ : Tuple = """audioset-id2label.json"""
UpperCAmelCase_ : int = json.load(open(hf_hub_download(_a , _a , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase_ : Tuple = {int(_a ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Any = idalabel
UpperCAmelCase_ : int = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
if "module.v" in name:
UpperCAmelCase_ : List[str] = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
UpperCAmelCase_ : List[Any] = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
UpperCAmelCase_ : Dict = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
UpperCAmelCase_ : Any = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
UpperCAmelCase_ : Dict = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
UpperCAmelCase_ : int = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
UpperCAmelCase_ : Any = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
UpperCAmelCase_ : Optional[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCAmelCase_ : Dict = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
UpperCAmelCase_ : str = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCAmelCase_ : Tuple = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
UpperCAmelCase_ : str = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
UpperCAmelCase_ : List[str] = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
UpperCAmelCase_ : Dict = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def lowerCamelCase_ ( _a : Optional[int] , _a : List[str] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : Tuple = orig_state_dict.pop(_a )
if "qkv" in key:
UpperCAmelCase_ : Optional[Any] = key.split(""".""" )
UpperCAmelCase_ : Union[str, Any] = int(key_split[3] )
UpperCAmelCase_ : str = config.hidden_size
if "weight" in key:
UpperCAmelCase_ : Any = val[:dim, :]
UpperCAmelCase_ : Optional[Any] = val[dim : dim * 2, :]
UpperCAmelCase_ : Optional[Any] = val[-dim:, :]
else:
UpperCAmelCase_ : str = val[:dim]
UpperCAmelCase_ : str = val[dim : dim * 2]
UpperCAmelCase_ : Dict = val[-dim:]
else:
UpperCAmelCase_ : Tuple = val
return orig_state_dict
def lowerCamelCase_ ( _a : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(_a , _a )
@torch.no_grad()
def lowerCamelCase_ ( _a : str , _a : Any , _a : List[str]=False ):
'''simple docstring'''
UpperCAmelCase_ : Dict = get_audio_spectrogram_transformer_config(_a )
UpperCAmelCase_ : List[str] = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
UpperCAmelCase_ : Optional[Any] = model_name_to_url[model_name]
UpperCAmelCase_ : Tuple = torch.hub.load_state_dict_from_url(_a , map_location="""cpu""" )
# remove some keys
remove_keys(_a )
# rename some keys
UpperCAmelCase_ : List[str] = convert_state_dict(_a , _a )
# load 🤗 model
UpperCAmelCase_ : Optional[Any] = ASTForAudioClassification(_a )
model.eval()
model.load_state_dict(_a )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
UpperCAmelCase_ : List[str] = -4.2_6_7_7_3_9_3 if """speech-commands""" not in model_name else -6.8_4_5_9_7_8
UpperCAmelCase_ : Union[str, Any] = 4.5_6_8_9_9_7_4 if """speech-commands""" not in model_name else 5.5_6_5_4_5_2_6
UpperCAmelCase_ : Any = 1024 if """speech-commands""" not in model_name else 128
UpperCAmelCase_ : Optional[int] = ASTFeatureExtractor(mean=_a , std=_a , max_length=_a )
if "speech-commands" in model_name:
UpperCAmelCase_ : str = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
UpperCAmelCase_ : List[str] = dataset[0]["""audio"""]["""array"""]
else:
UpperCAmelCase_ : Dict = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
UpperCAmelCase_ , UpperCAmelCase_ : Any = torchaudio.load(_a )
UpperCAmelCase_ : str = waveform.squeeze().numpy()
UpperCAmelCase_ : Any = feature_extractor(_a , sampling_rate=1_6000 , return_tensors="""pt""" )
# forward pass
UpperCAmelCase_ : Union[str, Any] = model(**_a )
UpperCAmelCase_ : Union[str, Any] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
UpperCAmelCase_ : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
UpperCAmelCase_ : List[Any] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
UpperCAmelCase_ : Union[str, Any] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
UpperCAmelCase_ : Dict = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
UpperCAmelCase_ : Tuple = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
UpperCAmelCase_ : Union[str, Any] = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
UpperCAmelCase_ : str = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] )
elif model_name == "ast-finetuned-speech-commands-v2":
UpperCAmelCase_ : List[Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , _a , atol=1E-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_a ).mkdir(exist_ok=_a )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_a )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(_a )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCamelCase_ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 345
|
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: int=13 ,lowerCamelCase_: int=32 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: Any=3 ,lowerCamelCase_: str=16 ,lowerCamelCase_: Optional[Any]=[1, 2, 1] ,lowerCamelCase_: Tuple=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[Any]=2.0 ,lowerCamelCase_: str=True ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[Any]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Union[str, Any]=False ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=10 ,lowerCamelCase_: Tuple=8 ,) -> List[Any]:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Union[str, Any] = depths
UpperCAmelCase_ : List[str] = num_heads
UpperCAmelCase_ : int = window_size
UpperCAmelCase_ : List[str] = mlp_ratio
UpperCAmelCase_ : Tuple = qkv_bias
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = drop_path_rate
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : int = use_absolute_embeddings
UpperCAmelCase_ : Any = patch_norm
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Dict = scope
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[str] = encoder_stride
def A__ ( self: Any ) -> int:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Union[str, Any]:
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ) -> str:
UpperCAmelCase_ : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: List[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : Any = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Optional[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: str ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Tuple = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
A__ : Optional[Any] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
A__ : List[Any] = False
A__ : Tuple = False
A__ : int = False
A__ : Union[str, Any] = False
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = SwinvaModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 )
def A__ ( self: Optional[int] ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: Any ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def A__ ( self: Tuple ) -> List[str]:
pass
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : int = [*signature.parameters.keys()]
UpperCAmelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Any = True
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[Any] = outputs.attentions
UpperCAmelCase_ : List[str] = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Optional[Any] = config.window_size**2
UpperCAmelCase_ : Optional[int] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ )
# Check attention is always last and order is fine
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
if hasattr(self.model_tester ,"""num_hidden_states_types""" ):
UpperCAmelCase_ : List[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase_ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def A__ ( self: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> List[Any]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : int = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[str] = outputs.hidden_states
UpperCAmelCase_ : Optional[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# Swinv2 has a different seq_length
UpperCAmelCase_ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = reshaped_hidden_states[0].shape
UpperCAmelCase_ : Optional[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = 3
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : List[str] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def A__ ( self: str ) -> Tuple:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Dict ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
lowerCamelCase_ )
UpperCAmelCase_ : Any = self.default_image_processor
UpperCAmelCase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Optional[int] = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
| 345
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 370
|
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __lowerCamelCase ( __UpperCamelCase ) -> Any:
"""simple docstring"""
if not is_accelerate_available():
return method
lowerCAmelCase_ : Union[str, Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(__UpperCamelCase ) < version.parse("0.17.0" ):
return method
def wrapper(self , *__UpperCamelCase , **__UpperCamelCase ):
if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self , *__UpperCamelCase , **__UpperCamelCase )
return wrapper
| 161
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Tuple=1_8 , _lowerCAmelCase : Tuple=3_0 , _lowerCAmelCase : int=4_0_0 , _lowerCAmelCase : int=True , _lowerCAmelCase : List[Any]=3_2 , _lowerCAmelCase : List[Any]=True , ) -> Any:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size_divisor
snake_case_ = do_rescale
def lowerCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __lowerCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = GLPNImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
snake_case_ = GLPNImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "size_divisor" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "resample" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_rescale" ) )
def lowerCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 159
|
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCamelCase_ = get_logger(__name__)
class _UpperCAmelCase :
"""simple docstring"""
snake_case = '''dummy_data'''
snake_case = '''datasets'''
snake_case = False
def __init__( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Union[Version, str] , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[List[Callable]] = None , ):
'''simple docstring'''
_A = 0
_A = dataset_name
_A = cache_dir
_A = use_local_dummy_data
_A = config
# download_callbacks take a single url as input
_A = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_A = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_A = str(__UpperCAmelCase )
# to be downloaded
_A = None
_A = None
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
if self._dummy_file is None:
_A = self.download_dummy_data()
return self._dummy_file
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_A = cached_path(
__UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=__UpperCAmelCase , force_extract=__UpperCAmelCase )
return os.path.join(__UpperCAmelCase , self.dummy_file_name )
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowerCAmelCase ( self : int ):
'''simple docstring'''
if self._bucket_url is None:
_A = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def lowerCAmelCase ( self : str ):
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] , *__UpperCAmelCase : Dict ):
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_A = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_A = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return self.create_dummy_data_dict(__UpperCAmelCase , __UpperCAmelCase )
elif isinstance(__UpperCAmelCase , (list, tuple) ):
return self.create_dummy_data_list(__UpperCAmelCase , __UpperCAmelCase )
else:
return self.create_dummy_data_single(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : Any ):
'''simple docstring'''
return self.download_and_extract(__UpperCAmelCase )
def lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str ):
'''simple docstring'''
return self.download_and_extract(__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : List[str] ):
'''simple docstring'''
return path
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return {}
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
_A = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
for single_url in single_urls:
download_callback(__UpperCAmelCase )
else:
_A = single_urls
download_callback(__UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = [os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) for x in single_urls]
else:
_A = single_urls
_A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) )
_A = value
# make sure that values are unique
if all(isinstance(__UpperCAmelCase , __UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_A = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
_A = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __UpperCAmelCase ) ) for url in data_url )
_A = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_A = [data_url[0]] * len(__UpperCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(__UpperCAmelCase )
return dummy_data_list
def lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ):
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(__UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(__UpperCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Any , __UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
def _iter_archive_members(__UpperCAmelCase : List[Any] ):
# this preserves the order of the members inside the ZIP archive
_A = Path(self.dummy_file ).parent
_A = path.relative_to(__UpperCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_A = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__UpperCAmelCase )
_A = Path(__UpperCAmelCase )
_A = _iter_archive_members(__UpperCAmelCase ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(__UpperCAmelCase ).as_posix(), file_path.open("rb" )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = [paths]
for path in paths:
if os.path.isfile(__UpperCAmelCase ):
if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__UpperCAmelCase ):
if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(__UpperCAmelCase ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(__UpperCAmelCase , __UpperCAmelCase )
| 79
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ : Optional[int] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Union[str, Any] = ['''ChineseCLIPFeatureExtractor''']
lowerCAmelCase_ : List[Any] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[Any] = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowerCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 362
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCAmelCase ( __a ):
def snake_case_ (self ):
_UpperCAmelCase : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , """depth_multiplier""" ) )
class __lowerCAmelCase :
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=3 , lowerCAmelCase__=3_2 , lowerCAmelCase__=0.2_5 , lowerCAmelCase__=8 , lowerCAmelCase__=True , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=3_2 , lowerCAmelCase__="relu6" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=1_0 , lowerCAmelCase__=None , ):
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : List[str] = num_channels
_UpperCAmelCase : int = image_size
_UpperCAmelCase : List[str] = depth_multiplier
_UpperCAmelCase : Any = min_depth
_UpperCAmelCase : Dict = tf_padding
_UpperCAmelCase : Dict = int(last_hidden_size * depth_multiplier )
_UpperCAmelCase : Dict = output_stride
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Optional[Any] = classifier_dropout_prob
_UpperCAmelCase : Optional[Any] = use_labels
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : int = num_labels
_UpperCAmelCase : Dict = initializer_range
_UpperCAmelCase : Any = scope
def snake_case_ (self ):
_UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : int = None
_UpperCAmelCase : Optional[int] = None
if self.use_labels:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ (self ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = self.num_labels
_UpperCAmelCase : Tuple = MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : List[str] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ (self ):
_UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = config_and_inputs
_UpperCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __a , __a , unittest.TestCase ):
snake_case : Tuple = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
snake_case : Optional[Any] = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
snake_case : str = False
snake_case : str = False
snake_case : Optional[Any] = False
snake_case : Optional[int] = False
def snake_case_ (self ):
_UpperCAmelCase : Optional[int] = MobileNetVaModelTester(self )
_UpperCAmelCase : str = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def snake_case_ (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def snake_case_ (self ):
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def snake_case_ (self ):
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def snake_case_ (self ):
pass
def snake_case_ (self ):
_UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Dict = model_class(lowerCAmelCase__ )
_UpperCAmelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : str = [*signature.parameters.keys()]
_UpperCAmelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case_ (self ):
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Any = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : Optional[Any] = outputs.hidden_states
_UpperCAmelCase : Tuple = 2_6
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def snake_case_ (self ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : List[str] = MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __A ( ):
_UpperCAmelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case_ (self ):
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def snake_case_ (self ):
_UpperCAmelCase : Optional[int] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = self.default_image_processor
_UpperCAmelCase : List[Any] = prepare_img()
_UpperCAmelCase : Tuple = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**lowerCAmelCase__ )
# verify the logits
_UpperCAmelCase : Optional[int] = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
_UpperCAmelCase : str = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 170
| 0
|
def lowerCAmelCase ( _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 1000 ):
"""simple docstring"""
UpperCAmelCase__ = 1
UpperCAmelCase__ = 0
for divide_by_number in range(_lowerCAmelCase , digit + 1 ):
UpperCAmelCase__ = []
UpperCAmelCase__ = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_lowerCAmelCase ):
UpperCAmelCase__ = len(_lowerCAmelCase )
UpperCAmelCase__ = divide_by_number
else:
has_been_divided.append(_lowerCAmelCase )
UpperCAmelCase__ = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 169
|
import math
class _UpperCamelCase :
def __init__( self :Union[str, Any] , lowerCamelCase :Union[str, Any]=0 ) -> Tuple: # a graph with Node 0,1,...,N-1
UpperCAmelCase__ = n
UpperCAmelCase__ = [
[math.inf for j in range(0 , lowerCamelCase )] for i in range(0 , lowerCamelCase )
] # adjacency matrix for weight
UpperCAmelCase__ = [
[math.inf for j in range(0 , lowerCamelCase )] for i in range(0 , lowerCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase_ ( self :Tuple , lowerCamelCase :List[Any] , lowerCamelCase :Optional[Any] , lowerCamelCase :int ) -> List[Any]:
UpperCAmelCase__ = w
def UpperCAmelCase_ ( self :Optional[int] ) -> Optional[Any]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
UpperCAmelCase__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase_ ( self :int , lowerCamelCase :List[Any] , lowerCamelCase :Dict ) -> List[str]:
return self.dp[u][v]
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 169
| 1
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'linear'
lowercase = 'cosine'
lowercase = 'cosine_with_restarts'
lowercase = 'polynomial'
lowercase = 'constant'
lowercase = 'constant_with_warmup'
lowercase = 'piecewise_constant'
def __lowerCamelCase ( _lowercase , _lowercase = -1 ) -> List[str]:
return LambdaLR(_lowercase , lambda _lowercase : 1 , last_epoch=_lowercase )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase = -1 ) -> List[str]:
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1.0 , _lowercase ) )
return 1.0
return LambdaLR(_lowercase , _lowercase , last_epoch=_lowercase )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase = -1 ) -> List[Any]:
UpperCAmelCase : List[Any] = {}
UpperCAmelCase : Tuple = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = rule_str.split(""":""" )
UpperCAmelCase : Optional[Any] = int(_lowercase )
UpperCAmelCase : Any = float(_lowercase )
UpperCAmelCase : Union[str, Any] = value
UpperCAmelCase : List[str] = float(rule_list[-1] )
def create_rules_function(_lowercase , _lowercase ):
def rule_func(_lowercase ) -> float:
UpperCAmelCase : Dict = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowercase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase : Tuple = create_rules_function(_lowercase , _lowercase )
return LambdaLR(_lowercase , _lowercase , last_epoch=_lowercase )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=-1 ) -> Union[str, Any]:
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowercase , _lowercase , _lowercase )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase = 0.5 , _lowercase = -1 ) -> Any:
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
UpperCAmelCase : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowercase ) * 2.0 * progress )) )
return LambdaLR(_lowercase , _lowercase , _lowercase )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase = 1 , _lowercase = -1 ) -> int:
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
UpperCAmelCase : str = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowercase ) * progress) % 1.0) )) )
return LambdaLR(_lowercase , _lowercase , _lowercase )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=1e-7 , _lowercase=1.0 , _lowercase=-1 ) -> Optional[Any]:
UpperCAmelCase : str = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(_lowercase ):
if current_step < num_warmup_steps:
return float(_lowercase ) / float(max(1 , _lowercase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase : Union[str, Any] = lr_init - lr_end
UpperCAmelCase : List[str] = num_training_steps - num_warmup_steps
UpperCAmelCase : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase : Tuple = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowercase , _lowercase , _lowercase )
a : Any = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = 1 , _lowercase = 1.0 , _lowercase = -1 , ) -> Union[str, Any]:
UpperCAmelCase : Any = SchedulerType(_lowercase )
UpperCAmelCase : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowercase , last_epoch=_lowercase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowercase , step_rules=_lowercase , last_epoch=_lowercase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowercase , num_warmup_steps=_lowercase , last_epoch=_lowercase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowercase , num_warmup_steps=_lowercase , num_training_steps=_lowercase , num_cycles=_lowercase , last_epoch=_lowercase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowercase , num_warmup_steps=_lowercase , num_training_steps=_lowercase , power=_lowercase , last_epoch=_lowercase , )
return schedule_func(
_lowercase , num_warmup_steps=_lowercase , num_training_steps=_lowercase , last_epoch=_lowercase )
| 338
|
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A , A = None , A = None , A = False , **A , ) -> Tuple:
super().__init__(features=A , cache_dir=A , keep_in_memory=A , **A )
UpperCAmelCase : Any = Sql(
cache_dir=A , features=A , sql=A , con=A , **A , )
def _lowercase( self ) -> Dict:
UpperCAmelCase : Any = None
UpperCAmelCase : Any = None
UpperCAmelCase : int = None
UpperCAmelCase : int = None
self.builder.download_and_prepare(
download_config=A , download_mode=A , verification_mode=A , base_path=A , )
# Build dataset for splits
UpperCAmelCase : str = self.builder.as_dataset(
split="""train""" , verification_mode=A , in_memory=self.keep_in_memory )
return dataset
class UpperCamelCase_ :
def __init__( self , A , A , A , A = None , A = None , **A , ) -> str:
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCAmelCase : Dict = dataset
UpperCAmelCase : List[Any] = name
UpperCAmelCase : Any = con
UpperCAmelCase : Optional[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase : Optional[Any] = num_proc
UpperCAmelCase : str = to_sql_kwargs
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.to_sql_kwargs.pop("""sql""" , A )
UpperCAmelCase : str = self.to_sql_kwargs.pop("""con""" , A )
UpperCAmelCase : Union[str, Any] = self.to_sql_kwargs.pop("""index""" , A )
UpperCAmelCase : str = self._write(index=A , **self.to_sql_kwargs )
return written
def _lowercase( self , A ) -> Any:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = args
UpperCAmelCase : Union[str, Any] = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
UpperCAmelCase : int = query_table(
table=self.dataset.data , key=slice(A , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCAmelCase : Any = batch.to_pandas()
UpperCAmelCase : List[Any] = df.to_sql(self.name , self.con , index=A , **A )
return num_rows or len(A )
def _lowercase( self , A , **A ) -> int:
UpperCAmelCase : Optional[int] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCAmelCase , UpperCAmelCase : List[str] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , A , A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 338
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.