code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
if isinstance(UpperCamelCase_ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase_ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE__ = np.concatenate(UpperCamelCase_ , axis=0 )
SCREAMING_SNAKE_CASE__ = np.array(UpperCamelCase_ ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE__ = image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase_ )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = torch.cat(UpperCamelCase_ , dim=0 )
return image
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=0.9_995 ) -> Dict:
'''simple docstring'''
if not isinstance(UpperCamelCase_ , np.ndarray ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = va.device
SCREAMING_SNAKE_CASE__ = va.cpu().numpy()
SCREAMING_SNAKE_CASE__ = va.cpu().numpy()
SCREAMING_SNAKE_CASE__ = np.sum(va * va / (np.linalg.norm(UpperCamelCase_ ) * np.linalg.norm(UpperCamelCase_ )) )
if np.abs(UpperCamelCase_ ) > DOT_THRESHOLD:
SCREAMING_SNAKE_CASE__ = (1 - t) * va + t * va
else:
SCREAMING_SNAKE_CASE__ = np.arccos(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = np.sin(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = theta_a * t
SCREAMING_SNAKE_CASE__ = np.sin(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = np.sin(theta_a - theta_t ) / sin_theta_a
SCREAMING_SNAKE_CASE__ = sin_theta_t / sin_theta_a
SCREAMING_SNAKE_CASE__ = sa * va + sa * va
if inputs_are_torch:
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
return va
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = F.normalize(UpperCamelCase_ , dim=-1 )
SCREAMING_SNAKE_CASE__ = F.normalize(UpperCamelCase_ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
for param in model.parameters():
SCREAMING_SNAKE_CASE__ = value
class lowercase__ ( _UpperCAmelCase ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : CLIPTextModel , UpperCAmelCase_ : CLIPModel , UpperCAmelCase_ : CLIPTokenizer , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , UpperCAmelCase_ : CLIPFeatureExtractor , UpperCAmelCase_ : str=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[str]=None , ):
super().__init__()
self.register_modules(
vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , clip_model=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , coca_model=UpperCAmelCase_ , coca_tokenizer=UpperCAmelCase_ , coca_transform=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = (
feature_extractor.size
if isinstance(feature_extractor.size , UpperCAmelCase_ )
else feature_extractor.size['shortest_edge']
)
SCREAMING_SNAKE_CASE__ = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , UpperCAmelCase_ )
set_requires_grad(self.clip_model , UpperCAmelCase_ )
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_ )
def A_ ( self : Any ):
self.enable_attention_slicing(UpperCAmelCase_ )
def A_ ( self : List[str] ):
set_requires_grad(self.vae , UpperCAmelCase_ )
def A_ ( self : str ):
set_requires_grad(self.vae , UpperCAmelCase_ )
def A_ ( self : Optional[int] ):
set_requires_grad(self.unet , UpperCAmelCase_ )
def A_ ( self : Any ):
set_requires_grad(self.unet , UpperCAmelCase_ )
def A_ ( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] ):
# get the original timestep using init_timestep
SCREAMING_SNAKE_CASE__ = min(int(num_inference_steps * strength ) , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def A_ ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=None ):
if not isinstance(UpperCAmelCase_ , torch.Tensor ):
raise ValueError(F'`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase_ )}' )
SCREAMING_SNAKE_CASE__ = image.to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase_ )
]
SCREAMING_SNAKE_CASE__ = torch.cat(UpperCAmelCase_ , dim=0 )
else:
SCREAMING_SNAKE_CASE__ = self.vae.encode(UpperCAmelCase_ ).latent_dist.sample(UpperCAmelCase_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE__ = 0.18_215 * init_latents
SCREAMING_SNAKE_CASE__ = init_latents.repeat_interleave(UpperCAmelCase_ , dim=0 )
SCREAMING_SNAKE_CASE__ = randn_tensor(init_latents.shape , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
# get latents
SCREAMING_SNAKE_CASE__ = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = init_latents
return latents
def A_ ( self : Optional[int] , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE__ = self.coca_transform(UpperCAmelCase_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE__ = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
SCREAMING_SNAKE_CASE__ = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = self.feature_extractor.preprocess(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
SCREAMING_SNAKE_CASE__ = self.clip_model.get_image_features(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = image_embeddings_clip.repeat_interleave(UpperCAmelCase_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def A_ ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ = latents.detach().requires_grad_()
SCREAMING_SNAKE_CASE__ = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# predict the noise residual
SCREAMING_SNAKE_CASE__ = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
SCREAMING_SNAKE_CASE__ = self.scheduler.alphas_cumprod[timestep]
SCREAMING_SNAKE_CASE__ = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
SCREAMING_SNAKE_CASE__ = torch.sqrt(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = self.scheduler.sigmas[index]
SCREAMING_SNAKE_CASE__ = latents - sigma * noise_pred
else:
raise ValueError(F'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE__ = 1 / 0.18_215 * sample
SCREAMING_SNAKE_CASE__ = self.vae.decode(UpperCAmelCase_ ).sample
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.normalize(UpperCAmelCase_ ).to(latents.dtype )
SCREAMING_SNAKE_CASE__ = self.clip_model.get_image_features(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = spherical_dist_loss(UpperCAmelCase_ , UpperCAmelCase_ ).mean() * clip_guidance_scale
SCREAMING_SNAKE_CASE__ = -torch.autograd.grad(UpperCAmelCase_ , UpperCAmelCase_ )[0]
if isinstance(self.scheduler , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = latents.detach() + grads * (sigma**2)
SCREAMING_SNAKE_CASE__ = noise_pred_original
else:
SCREAMING_SNAKE_CASE__ = noise_pred_original - torch.sqrt(UpperCAmelCase_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Optional[int] , UpperCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , UpperCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[int] = 512 , UpperCAmelCase_ : Optional[int] = 512 , UpperCAmelCase_ : float = 0.6 , UpperCAmelCase_ : Optional[int] = 50 , UpperCAmelCase_ : Optional[float] = 7.5 , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[float] = 100 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : float = 0.8 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(UpperCAmelCase_ ) != batch_size:
raise ValueError(F'You have passed {batch_size} batch_size, but only {len(UpperCAmelCase_ )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(UpperCAmelCase_ , torch.Generator ) and batch_size > 1:
SCREAMING_SNAKE_CASE__ = [generator] + [None] * (batch_size - 1)
SCREAMING_SNAKE_CASE__ = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
SCREAMING_SNAKE_CASE__ = [x[0] for x in coca_is_none if x[1]]
SCREAMING_SNAKE_CASE__ = ', '.join(UpperCAmelCase_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCAmelCase_ ):
raise ValueError(
F'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
F'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
SCREAMING_SNAKE_CASE__ = self.get_image_description(UpperCAmelCase_ )
if style_prompt is None:
if len(UpperCAmelCase_ ):
raise ValueError(
F'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
F' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
SCREAMING_SNAKE_CASE__ = self.get_image_description(UpperCAmelCase_ )
# get prompt text embeddings for content and style
SCREAMING_SNAKE_CASE__ = self.tokenizer(
UpperCAmelCase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=UpperCAmelCase_ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE__ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
SCREAMING_SNAKE_CASE__ = self.tokenizer(
UpperCAmelCase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=UpperCAmelCase_ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE__ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
SCREAMING_SNAKE_CASE__ = slerp(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE__ = text_embeddings.repeat_interleave(UpperCAmelCase_ , dim=0 )
# set timesteps
SCREAMING_SNAKE_CASE__ = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
SCREAMING_SNAKE_CASE__ = {}
if accepts_offset:
SCREAMING_SNAKE_CASE__ = 1
self.scheduler.set_timesteps(UpperCAmelCase_ , **UpperCAmelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_timesteps(UpperCAmelCase_ , UpperCAmelCase_ , self.device )
SCREAMING_SNAKE_CASE__ = timesteps[:1].repeat(UpperCAmelCase_ )
# Preprocess image
SCREAMING_SNAKE_CASE__ = preprocess(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.prepare_latents(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , text_embeddings.dtype , self.device , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = preprocess(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.prepare_latents(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , text_embeddings.dtype , self.device , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = slerp(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if clip_guidance_scale > 0:
SCREAMING_SNAKE_CASE__ = self.get_clip_image_embeddings(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.get_clip_image_embeddings(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = slerp(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
SCREAMING_SNAKE_CASE__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ = content_text_input.input_ids.shape[-1]
SCREAMING_SNAKE_CASE__ = self.tokenizer([''] , padding='max_length' , max_length=UpperCAmelCase_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
SCREAMING_SNAKE_CASE__ = uncond_embeddings.repeat_interleave(UpperCAmelCase_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
SCREAMING_SNAKE_CASE__ = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
SCREAMING_SNAKE_CASE__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
SCREAMING_SNAKE_CASE__ = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device='cpu' , dtype=UpperCAmelCase_ ).to(
self.device )
else:
SCREAMING_SNAKE_CASE__ = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=self.device , dtype=UpperCAmelCase_ )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
SCREAMING_SNAKE_CASE__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE__ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE__ = {}
if accepts_eta:
SCREAMING_SNAKE_CASE__ = eta
# check if the scheduler accepts generator
SCREAMING_SNAKE_CASE__ = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
SCREAMING_SNAKE_CASE__ = generator
with self.progress_bar(total=UpperCAmelCase_ ):
for i, t in enumerate(UpperCAmelCase_ ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE__ = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# predict the noise residual
SCREAMING_SNAKE_CASE__ = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
SCREAMING_SNAKE_CASE__ = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.cond_fn(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE__ = 1 / 0.18_215 * latents
SCREAMING_SNAKE_CASE__ = self.vae.decode(UpperCAmelCase_ ).sample
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCAmelCase_ , nsfw_content_detected=UpperCAmelCase_ )
| 176 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {"""vocab_file""": """vocab.txt"""}
__snake_case = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
__snake_case = {
"""facebook/esm2_t6_8M_UR50D""": 10_24,
"""facebook/esm2_t12_35M_UR50D""": 10_24,
}
def _lowercase ( UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
with open(UpperCamelCase_ , 'r' ) as f:
SCREAMING_SNAKE_CASE__ = f.read().splitlines()
return [l.strip() for l in lines]
class lowercase__ ( _UpperCAmelCase ):
A__ : Tuple =VOCAB_FILES_NAMES
A__ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
A__ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Any =["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple="<unk>" , UpperCAmelCase_ : Optional[Any]="<cls>" , UpperCAmelCase_ : List[Any]="<pad>" , UpperCAmelCase_ : Optional[int]="<mask>" , UpperCAmelCase_ : Optional[int]="<eos>" , **UpperCAmelCase_ : Optional[int] , ):
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = load_vocab_file(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE__ = unk_token
SCREAMING_SNAKE_CASE__ = cls_token
SCREAMING_SNAKE_CASE__ = pad_token
SCREAMING_SNAKE_CASE__ = mask_token
SCREAMING_SNAKE_CASE__ = eos_token
SCREAMING_SNAKE_CASE__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A_ ( self : Any , UpperCAmelCase_ : int ):
return self._id_to_token.get(UpperCAmelCase_ , self.unk_token )
def A_ ( self : Dict , UpperCAmelCase_ : str ):
return self._token_to_id.get(UpperCAmelCase_ , self._token_to_id.get(self.unk_token ) )
def A_ ( self : List[str] , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[Any] ):
return text.split()
def A_ ( self : str , UpperCAmelCase_ : Optional[Any]=False ):
return len(self._id_to_token )
def A_ ( self : Union[str, Any] ):
return {token: i for i, token in enumerate(self.all_tokens )}
def A_ ( self : Any , UpperCAmelCase_ : str ):
return self._token_to_id.get(UpperCAmelCase_ , self._token_to_id.get(self.unk_token ) )
def A_ ( self : List[str] , UpperCAmelCase_ : int ):
return self._id_to_token.get(UpperCAmelCase_ , self.unk_token )
def A_ ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A_ ( self : Dict , UpperCAmelCase_ : List , UpperCAmelCase_ : Optional[List] = None , UpperCAmelCase_ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE__ = [1] + ([0] * len(UpperCAmelCase_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase_ ) + [1]
return mask
def A_ ( self : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(UpperCAmelCase_ , 'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def A_ ( self : int ):
return self.get_vocab_size(with_added_tokens=UpperCAmelCase_ )
def A_ ( self : List[str] , UpperCAmelCase_ : Union[List[str], List[AddedToken]] , UpperCAmelCase_ : bool = False ):
return super()._add_tokens(UpperCAmelCase_ , special_tokens=UpperCAmelCase_ )
| 176 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class lowercase__ ( lowercase ):
lowercase__ = """imagegpt"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int ,lowerCamelCase__ : Union[str, Any]=512 + 1 ,lowerCamelCase__ : str=32 * 32 ,lowerCamelCase__ : Dict=512 ,lowerCamelCase__ : str=24 ,lowerCamelCase__ : Optional[int]=8 ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : str="quick_gelu" ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : str=1E-5 ,lowerCamelCase__ : Optional[Any]=0.0_2 ,lowerCamelCase__ : Optional[int]=True ,lowerCamelCase__ : Optional[int]=True ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Dict=False ,lowerCamelCase__ : Tuple=False ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = n_positions
_UpperCamelCase : List[Any] = n_embd
_UpperCamelCase : Optional[Any] = n_layer
_UpperCamelCase : Dict = n_head
_UpperCamelCase : Dict = n_inner
_UpperCamelCase : str = activation_function
_UpperCamelCase : List[str] = resid_pdrop
_UpperCamelCase : Optional[int] = embd_pdrop
_UpperCamelCase : Optional[Any] = attn_pdrop
_UpperCamelCase : List[Any] = layer_norm_epsilon
_UpperCamelCase : Optional[int] = initializer_range
_UpperCamelCase : Any = scale_attn_weights
_UpperCamelCase : List[str] = use_cache
_UpperCamelCase : int = scale_attn_by_inverse_layer_idx
_UpperCamelCase : Any = reorder_and_upcast_attn
_UpperCamelCase : int = tie_word_embeddings
super().__init__(tie_word_embeddings=lowerCamelCase__ ,**lowerCamelCase__ )
class lowercase__ ( lowercase ):
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
] )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : "FeatureExtractionMixin" ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional["TensorType"] = None ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 32 ,lowerCamelCase__ : int = 32 ,):
'''simple docstring'''
_UpperCamelCase : Any = self._generate_dummy_images(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = dict(preprocessor(images=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ) )
return inputs
| 236 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# Initialise PyTorch model
_UpperCamelCase : Any = LxmertConfig.from_json_file(UpperCAmelCase_ )
print(f'Building PyTorch model from configuration: {config}' )
_UpperCamelCase : int = LxmertForPreTraining(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 236 | 1 |
"""simple docstring"""
from datetime import datetime
import requests
def __UpperCAmelCase ( __lowerCamelCase ) -> bytes:
lowercase__ : List[str] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
lowercase__ : Optional[int] = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(__lowerCamelCase ).content
if __name__ == "__main__":
lowerCAmelCase_ = input('Enter Video/IGTV url: ').strip()
lowerCAmelCase_ = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 16 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Dict ,*_snake_case : Any ,**_snake_case : str ) -> None:
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' ,_snake_case ,)
super().__init__(*_snake_case ,**_snake_case )
| 16 | 1 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__lowerCamelCase : Optional[Any] = HfArgumentParser(InitializationArguments)
__lowerCamelCase : int = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__lowerCamelCase : Any = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__lowerCamelCase : Tuple = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
__lowerCamelCase : Any = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__lowerCamelCase : int = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 286 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ):
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ):
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
snake_case_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286 | 1 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 19 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=[10, 20, 30, 40] , UpperCAmelCase=[2, 2, 3, 2] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=10 , UpperCAmelCase=0.02 , UpperCAmelCase=["stage2", "stage3", "stage4"] , UpperCAmelCase=3 , UpperCAmelCase=None , ) -> List[Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = num_stages
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = is_training
_snake_case = use_labels
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = out_features
_snake_case = num_labels
_snake_case = scope
_snake_case = num_stages
def lowercase (self ) -> List[Any]:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowercase (self ) -> Tuple:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowercase (self ) -> Any:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCAmelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
_snake_case = UperNetForSemanticSegmentation(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_snake_case = model(UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase (self ) -> Tuple:
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
), (
_snake_case
), (
_snake_case
),
) = config_and_inputs
_snake_case = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCAmelCase_ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowercase (self ) -> Optional[Any]:
_snake_case = UperNetModelTester(self )
_snake_case = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def lowercase (self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase (self ) -> Union[str, Any]:
return
def lowercase (self ) -> Union[str, Any]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(UpperCAmelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def lowercase (self ) -> int:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def lowercase (self ) -> int:
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def lowercase (self ) -> List[str]:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def lowercase (self ) -> Union[str, Any]:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def lowercase (self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowercase (self ) -> str:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase (self ) -> int:
pass
def lowercase (self ) -> List[str]:
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def lowercase (self ) -> List[str]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = _config_zero_init(UpperCAmelCase )
_snake_case = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_snake_case = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def lowercase (self ) -> Optional[Any]:
pass
@slow
def lowercase (self ) -> Tuple:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = UperNetForSemanticSegmentation.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_snake_case = Image.open(_SCREAMING_SNAKE_CASE ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Any:
_snake_case = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_snake_case = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(UpperCAmelCase )
_snake_case = prepare_img()
_snake_case = processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
_snake_case = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_snake_case = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
def lowercase (self ) -> Any:
_snake_case = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_snake_case = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(UpperCAmelCase )
_snake_case = prepare_img()
_snake_case = processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
_snake_case = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_snake_case = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1e-4 ) ) | 341 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : str = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 368 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def A__ ( ) -> Union[str, Any]:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
UpperCamelCase_: Optional[int] = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , lowerCamelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def A__ ( ) -> Union[str, Any]:
assert _test_patching.open is open
UpperCamelCase_: List[Any] = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , lowerCamelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def A__ ( ) -> Optional[Any]:
# pandas.read_csv is not present in _test_patching
UpperCamelCase_: Optional[Any] = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , lowerCamelCase ):
pass
def A__ ( ) -> Any:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
UpperCamelCase_: List[Any] = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , lowerCamelCase ) is None
with patch_submodule(_test_patching , """len""" , lowerCamelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def A__ ( ) -> Any:
UpperCamelCase_: Dict = """__test_patch_submodule_start_and_stop_mock__"""
UpperCamelCase_: List[str] = patch_submodule(_test_patching , """open""" , lowerCamelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def A__ ( ) -> List[str]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
UpperCamelCase_: Optional[Any] = """__test_patch_submodule_successive_join__"""
UpperCamelCase_: Any = """__test_patch_submodule_successive_dirname__"""
UpperCamelCase_: Dict = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , lowerCamelCase ):
with patch_submodule(_test_patching , """os.rename""" , lowerCamelCase ):
with patch_submodule(_test_patching , """os.path.dirname""" , lowerCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , lowerCamelCase ):
with patch_submodule(_test_patching , """os.path.join""" , lowerCamelCase ):
with patch_submodule(_test_patching , """os.path.dirname""" , lowerCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def A__ ( ) -> Union[str, Any]:
UpperCamelCase_: Dict = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , lowerCamelCase ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , lowerCamelCase ):
pass
| 223 | 0 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Generator[tuple[str, ...], None, None]:
SCREAMING_SNAKE_CASE__ : List[Any] = iter(__lowerCAmelCase )
while True:
SCREAMING_SNAKE_CASE__ : Optional[int] = tuple(itertools.islice(__lowerCAmelCase , __lowerCAmelCase ) )
if not chunk:
return
yield chunk
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : List[Any] = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
SCREAMING_SNAKE_CASE__ : Tuple = """"""
if len(__lowerCAmelCase ) < 2:
return dirty
for i in range(len(__lowerCAmelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__lowerCAmelCase ) & 1:
clean += "X"
return clean
def _lowercase ( __lowerCAmelCase ) -> list[str]:
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
SCREAMING_SNAKE_CASE__ : str = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
SCREAMING_SNAKE_CASE__ : Optional[int] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__lowerCAmelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__lowerCAmelCase )
return table
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Tuple = generate_table(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_input(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowerCAmelCase , 2 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = divmod(table.index(__lowerCAmelCase ) , 5 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = divmod(table.index(__lowerCAmelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : str = generate_table(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowerCAmelCase , 2 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = divmod(table.index(__lowerCAmelCase ) , 5 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = divmod(table.index(__lowerCAmelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 132 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __a (UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[int] = StableDiffusionInstructPixaPixPipeline
_SCREAMING_SNAKE_CASE :int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
_SCREAMING_SNAKE_CASE :Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_SCREAMING_SNAKE_CASE :int = IMAGE_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE :List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
SCREAMING_SNAKE_CASE__ : str = CLIPTextModel(_a )
SCREAMING_SNAKE_CASE__ : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE__ : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self , _a , _a=0 ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
SCREAMING_SNAKE_CASE__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : Dict = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" )
if str(_a ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : List[Any] = torch.manual_seed(_a )
else:
SCREAMING_SNAKE_CASE__ : str = torch.Generator(device=_a ).manual_seed(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline(**_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_dummy_inputs(_a )
SCREAMING_SNAKE_CASE__ : str = sd_pipe(**_a ).images
SCREAMING_SNAKE_CASE__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : int = StableDiffusionInstructPixaPixPipeline(**_a )
SCREAMING_SNAKE_CASE__ : List[Any] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : Dict = self.get_dummy_inputs(_a )
SCREAMING_SNAKE_CASE__ : int = """french fries"""
SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe(**_a , negative_prompt=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = output.images
SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**_a )
SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : int = self.get_dummy_inputs(_a )
SCREAMING_SNAKE_CASE__ : List[str] = [inputs["""prompt"""]] * 2
SCREAMING_SNAKE_CASE__ : List[str] = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.from_numpy(_a ).unsqueeze(0 ).to(_a )
SCREAMING_SNAKE_CASE__ : Any = image / 2 + 0.5
SCREAMING_SNAKE_CASE__ : Any = image.permute(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ : List[str] = image.repeat(2 , 1 , 1 , 1 )
SCREAMING_SNAKE_CASE__ : int = sd_pipe(**_a ).images
SCREAMING_SNAKE_CASE__ : List[Any] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : int = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
SCREAMING_SNAKE_CASE__ : int = StableDiffusionInstructPixaPixPipeline(**_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : Tuple = self.get_dummy_inputs(_a )
SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe(**_a ).images
SCREAMING_SNAKE_CASE__ : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Optional[int] = [round(_a , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(_a ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : Tuple = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _a ( self ) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = VaeImageProcessor(do_resize=_a , do_normalize=_a )
SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : str = pipe(**self.get_dummy_inputs_by_type(_a , input_image_type="""pt""" ) )[0]
SCREAMING_SNAKE_CASE__ : Tuple = components["""vae"""]
SCREAMING_SNAKE_CASE__ : Dict = self.get_dummy_inputs_by_type(_a , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
SCREAMING_SNAKE_CASE__ : Any = vae.encode(inputs[image_param] ).latent_dist.mode()
SCREAMING_SNAKE_CASE__ : Any = pipe(**_a )[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(_a , 1E-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _a=0 ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.manual_seed(_a )
SCREAMING_SNAKE_CASE__ : str = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
SCREAMING_SNAKE_CASE__ : Dict = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_inputs()
SCREAMING_SNAKE_CASE__ : Tuple = pipe(**_a ).images
SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_a )
SCREAMING_SNAKE_CASE__ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_inputs()
SCREAMING_SNAKE_CASE__ : str = pipe(**_a ).images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ : str = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_a )
SCREAMING_SNAKE_CASE__ : Tuple = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : str = self.get_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(**_a ).images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ : Any = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
def callback_fn(_a , _a , _a ) -> None:
SCREAMING_SNAKE_CASE__ : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE__ : str = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
SCREAMING_SNAKE_CASE__ : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE__ : List[Any] = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : str = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_a , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_inputs()
pipe(**_a , callback=_a , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _a ( self ) -> List[str]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_a , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : str = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ : List[str] = self.get_inputs()
SCREAMING_SNAKE_CASE__ : Any = pipe(**_a )
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs["""image"""].resize((504, 504) )
SCREAMING_SNAKE_CASE__ : str = """timbrooks/instruct-pix2pix"""
SCREAMING_SNAKE_CASE__ : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_a , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(**_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.images[0]
SCREAMING_SNAKE_CASE__ : Any = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 132 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : str = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 355 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase_ : Union[str, Any] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int=None , lowercase_ : Dict=1):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowercase_) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE_ : Optional[int] = n_copies
def __iter__( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = []
for task in range(self.n_tasks):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip())
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''')
for task in range(self.n_tasks):
for _ in range(self.n_copies):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = start_length
SCREAMING_SNAKE_CASE_ : List[Any] = eof_strings
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
def __call__( self : Optional[int] , lowercase_ : Any , lowercase_ : int , **lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.tokenizer.batch_decode(input_ids[:, self.start_length :])
SCREAMING_SNAKE_CASE_ : Tuple = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings))
return all(lowercase_)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = re.split('''(%s)''' % '''|'''.join(__a ) , __a )
# last string should be ""
return "".join(string_list[:-2] )
def _A (__a , __a , __a , __a , __a , __a=20 , **__a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = defaultdict(__a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__a ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = batch['''ids'''].shape[-1]
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.unwrap_model(__a ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__a , **__a )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE_ : List[Any] = batch['''task_id'''].repeat(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.pad_across_processes(
__a , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE_ : int = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__a , __a ):
gen_token_dict[task].append(__a )
SCREAMING_SNAKE_CASE_ : int = [[] for _ in range(__a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
code_gens[task].append(remove_last_block(__a ) )
return code_gens
def _A () -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = HfArgumentParser(__a )
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE_ : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE_ : str = '''false'''
if args.num_workers is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
set_seed(args.seed , device_specific=__a )
# Load model and tokenizer
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.eos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE_ : List[str] = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __a , __a )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE_ : Optional[int] = load_dataset('''openai_humaneval''' )
SCREAMING_SNAKE_CASE_ : str = load_metric('''code_eval''' )
SCREAMING_SNAKE_CASE_ : int = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
SCREAMING_SNAKE_CASE_ : List[str] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TokenizedDataset(__a , human_eval['''test'''] , n_copies=__a , n_tasks=__a )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(__a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(__a , __a )
SCREAMING_SNAKE_CASE_ : List[Any] = complete_code(
__a , __a , __a , __a , n_tasks=__a , batch_size=args.batch_size , **__a , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE_ : int = []
for task in tqdm(range(__a ) ):
SCREAMING_SNAKE_CASE_ : Tuple = human_eval['''test'''][task]['''test''']
SCREAMING_SNAKE_CASE_ : Tuple = f'check({human_eval["test"][task]["entry_point"]})'
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = code_eval_metric.compute(
references=__a , predictions=__a , num_workers=args.num_workers )
print(f'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__a , __a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 318 | 0 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def _snake_case ( snake_case__ : Iterable[str] , snake_case__ : int ):
A = iter(snake_case__ )
while True:
A = tuple(itertools.islice(snake_case__ , snake_case__ ) )
if not chunk:
return
yield chunk
def _snake_case ( snake_case__ : str ):
A = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
A = ''
if len(snake_case__ ) < 2:
return dirty
for i in range(len(snake_case__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(snake_case__ ) & 1:
clean += "X"
return clean
def _snake_case ( snake_case__ : str ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
A = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
A = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(snake_case__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(snake_case__ )
return table
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = generate_table(snake_case__ )
A = prepare_input(snake_case__ )
A = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(snake_case__ , 2 ):
A , A = divmod(table.index(snake_case__ ) , 5 )
A , A = divmod(table.index(snake_case__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = generate_table(snake_case__ )
A = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(snake_case__ , 2 ):
A , A = divmod(table.index(snake_case__ ) , 5 )
A , A = divmod(table.index(snake_case__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext | 74 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ : str = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : int = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure) | 225 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["image_processor", "tokenizer"]
UpperCAmelCase_ ="BlipImageProcessor"
UpperCAmelCase_ =("BertTokenizer", "BertTokenizerFast")
def __init__( self , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = False
super().__init__(_A , _A )
SCREAMING_SNAKE_CASE_ = self.image_processor
def __call__( self , _A = None , _A = None , _A = True , _A = False , _A = None , _A = None , _A = 0 , _A = None , _A = None , _A = False , _A = False , _A = False , _A = False , _A = False , _A = True , _A = None , **_A , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
SCREAMING_SNAKE_CASE_ = self.tokenizer
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
return text_encoding
# add pixel_values
SCREAMING_SNAKE_CASE_ = self.image_processor(_A , return_tensors=_A )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
else:
SCREAMING_SNAKE_CASE_ = None
if text_encoding is not None:
encoding_image_processor.update(_A )
return encoding_image_processor
def _UpperCamelCase ( self , *_A , **_A ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_A , **_A )
def _UpperCamelCase ( self , *_A , **_A ) -> Union[str, Any]:
return self.tokenizer.decode(*_A , **_A )
@property
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 367 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 257 | 0 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCamelCase__ : List[str] = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class _UpperCAmelCase ( unittest.TestCase , __a):
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = load_tool("""text-question-answering""" )
self.tool.setup()
_UpperCAmelCase : Tuple = load_tool("""text-question-answering""" , remote=_A )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.tool(_A , """What did Hugging Face do in April 2021?""" )
self.assertEqual(_A , """launched the BigScience Research Workshop""" )
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.remote_tool(_A , """What did Hugging Face do in April 2021?""" )
self.assertEqual(_A , """launched the BigScience Research Workshop""" )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.tool(text=_A , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(_A , """launched the BigScience Research Workshop""" )
def __snake_case ( self ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.remote_tool(text=_A , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(_A , """launched the BigScience Research Workshop""" )
| 246 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _UpperCAmelCase ( unittest.TestCase):
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : Tuple = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
_UpperCAmelCase : List[str] = dict(zip(_A , range(len(_A ) ) ) )
_UpperCAmelCase : List[Any] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
_UpperCAmelCase : Dict = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
_UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase : int = os.path.join(self.tmpdirname , _A )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_A ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_A ) + """\n""" )
# load decoder from hub
_UpperCAmelCase : List[Any] = """hf-internal-testing/ngram-beam-search-decoder"""
def __snake_case ( self , **_A ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : str = self.add_kwargs_tokens_map.copy()
kwargs.update(_A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A )
def __snake_case ( self , **_A ) -> Tuple:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A )
def __snake_case ( self , **_A ) -> Union[str, Any]:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = self.get_feature_extractor()
_UpperCAmelCase : List[str] = self.get_decoder()
_UpperCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Any = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _A )
def __snake_case ( self ) -> int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_UpperCAmelCase : Tuple = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(_A , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.get_feature_extractor()
_UpperCAmelCase : Optional[Any] = self.get_tokenizer()
_UpperCAmelCase : List[Any] = self.get_decoder()
_UpperCAmelCase : Dict = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : List[Any] = floats_list((3, 10_00) )
_UpperCAmelCase : str = feature_extractor(_A , return_tensors="""np""" )
_UpperCAmelCase : int = processor(_A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case ( self ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_feature_extractor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Dict = self.get_decoder()
_UpperCAmelCase : Any = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : Optional[Any] = """This is a test string"""
_UpperCAmelCase : Optional[Any] = processor(text=_A )
_UpperCAmelCase : Dict = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self , _A=(2, 10, 16) , _A=77 ) -> Union[str, Any]:
'''simple docstring'''
np.random.seed(_A )
return np.random.rand(*_A )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.get_feature_extractor()
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = self.get_decoder()
_UpperCAmelCase : Any = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : str = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_UpperCAmelCase : Optional[int] = processor.decode(_A )
_UpperCAmelCase : str = decoder.decode_beams(_A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def __snake_case ( self , _A ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.get_feature_extractor()
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = self.get_decoder()
_UpperCAmelCase : str = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : Union[str, Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_UpperCAmelCase : int = processor.batch_decode(_A )
else:
with get_context(_A ).Pool() as pool:
_UpperCAmelCase : Dict = processor.batch_decode(_A , _A )
_UpperCAmelCase : Tuple = list(_A )
with get_context("""fork""" ).Pool() as p:
_UpperCAmelCase : Tuple = decoder.decode_beams_batch(_A , _A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_A , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(_A , decoded_processor.logit_score )
self.assertListEqual(_A , decoded_processor.lm_score )
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.get_feature_extractor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Tuple = self.get_decoder()
_UpperCAmelCase : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : Optional[int] = self._get_dummy_logits()
_UpperCAmelCase : List[str] = 15
_UpperCAmelCase : Dict = -20.0
_UpperCAmelCase : List[str] = -4.0
_UpperCAmelCase : Any = processor.batch_decode(
_A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
_UpperCAmelCase : Any = decoded_processor_out.text
_UpperCAmelCase : Any = list(_A )
with get_context("""fork""" ).Pool() as pool:
_UpperCAmelCase : str = decoder.decode_beams_batch(
_A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
_UpperCAmelCase : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
_UpperCAmelCase : List[str] = [d[0][2] for d in decoded_decoder_out]
_UpperCAmelCase : Tuple = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , _A )
self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _A , atol=1e-3 ) )
self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _A , atol=1e-3 ) )
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : List[Any] = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = self.get_decoder()
_UpperCAmelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : Optional[int] = self._get_dummy_logits()
_UpperCAmelCase : Any = 2.0
_UpperCAmelCase : Union[str, Any] = 5.0
_UpperCAmelCase : List[Any] = -20.0
_UpperCAmelCase : str = True
_UpperCAmelCase : Union[str, Any] = processor.batch_decode(
_A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
_UpperCAmelCase : Tuple = decoded_processor_out.text
_UpperCAmelCase : Tuple = list(_A )
decoder.reset_params(
alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
with get_context("""fork""" ).Pool() as pool:
_UpperCAmelCase : Optional[Any] = decoder.decode_beams_batch(
_A , _A , )
_UpperCAmelCase : Optional[int] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , _A )
_UpperCAmelCase : List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _A )
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : int = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
_UpperCAmelCase : Union[str, Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_UpperCAmelCase : Optional[Any] = os.listdir(_A )
_UpperCAmelCase : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_A , _A )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : Dict = WavaVecaProcessorWithLM.from_pretrained(_A )
_UpperCAmelCase : Any = processor.decoder.model_container[processor.decoder._model_key]
_UpperCAmelCase : Dict = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_UpperCAmelCase : Optional[Any] = os.listdir(_A )
_UpperCAmelCase : Union[str, Any] = os.listdir(_A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_A , _A )
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : str = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : List[Any] = floats_list((3, 10_00) )
_UpperCAmelCase : str = processor_wavaveca(_A , return_tensors="""np""" )
_UpperCAmelCase : Any = processor_auto(_A , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
_UpperCAmelCase : Union[str, Any] = self._get_dummy_logits()
_UpperCAmelCase : Dict = processor_wavaveca.batch_decode(_A )
_UpperCAmelCase : Optional[Any] = processor_auto.batch_decode(_A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.get_feature_extractor()
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = self.get_decoder()
_UpperCAmelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def __snake_case ( _A , _A ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = [d[key] for d in offsets]
return retrieved_list
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : List[Any] = self._get_dummy_logits()[0]
_UpperCAmelCase : Tuple = processor.decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : Optional[Any] = self._get_dummy_logits()
_UpperCAmelCase : List[Any] = processor.batch_decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(_A , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __snake_case ( self ) -> str:
'''simple docstring'''
import torch
_UpperCAmelCase : List[str] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=_A )
_UpperCAmelCase : List[Any] = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
_UpperCAmelCase : List[Any] = iter(_A )
_UpperCAmelCase : Optional[Any] = next(_A )
_UpperCAmelCase : Optional[int] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
_UpperCAmelCase : Dict = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_UpperCAmelCase : List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
_UpperCAmelCase : Optional[int] = model(_A ).logits.cpu().numpy()
_UpperCAmelCase : Union[str, Any] = processor.decode(logits[0] , output_word_offsets=_A )
_UpperCAmelCase : str = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_UpperCAmelCase : Optional[int] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
_UpperCAmelCase : List[Any] = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(_A , """word""" ) ) , _A )
self.assertEqual(""" """.join(self.get_from_offsets(_A , """word""" ) ) , output.text )
# output times
_UpperCAmelCase : List[str] = torch.tensor(self.get_from_offsets(_A , """start_time""" ) )
_UpperCAmelCase : Any = torch.tensor(self.get_from_offsets(_A , """end_time""" ) )
# fmt: off
_UpperCAmelCase : Optional[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_UpperCAmelCase : List[Any] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_A , _A , atol=0.01 ) )
self.assertTrue(torch.allclose(_A , _A , atol=0.01 ) )
| 246 | 1 |
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
def get_masked_lm_array(lowerCAmelCase ):
UpperCAmelCase = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
UpperCAmelCase = tf.train.load_variable(lowerCAmelCase , lowerCAmelCase )
if "kernel" in name:
UpperCAmelCase = array.transpose()
return torch.from_numpy(lowerCAmelCase )
def get_encoder_array(lowerCAmelCase ):
UpperCAmelCase = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
UpperCAmelCase = tf.train.load_variable(lowerCAmelCase , lowerCAmelCase )
if "kernel" in name:
UpperCAmelCase = array.transpose()
return torch.from_numpy(lowerCAmelCase )
def get_encoder_layer_array(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
UpperCAmelCase = tf.train.load_variable(lowerCAmelCase , lowerCAmelCase )
if "kernel" in name:
UpperCAmelCase = array.transpose()
return torch.from_numpy(lowerCAmelCase )
def get_encoder_attention_layer_array(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
UpperCAmelCase = tf.train.load_variable(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = array.reshape(lowerCAmelCase )
if "kernel" in name:
UpperCAmelCase = array.transpose()
return torch.from_numpy(lowerCAmelCase )
print(F'''Loading model based on config from {config_path}...''' )
UpperCAmelCase = BertConfig.from_json_file(lowerCAmelCase )
UpperCAmelCase = BertForMaskedLM(lowerCAmelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
UpperCAmelCase = model.bert.encoder.layer[layer_index]
# Self-attention
UpperCAmelCase = layer.attention.self
UpperCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
UpperCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_query_dense/bias""" , self_attn.query.bias.data.shape )
UpperCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
UpperCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_key_dense/bias""" , self_attn.key.bias.data.shape )
UpperCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
UpperCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
UpperCAmelCase = layer.attention.output
UpperCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
UpperCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_output_dense/bias""" , self_output.dense.bias.data.shape )
UpperCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_attention_layer_norm/gamma""" )
UpperCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_attention_layer_norm/beta""" )
# Intermediate
UpperCAmelCase = layer.intermediate
UpperCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_intermediate_dense/kernel""" )
UpperCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_intermediate_dense/bias""" )
# Output
UpperCAmelCase = layer.output
UpperCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_output_dense/kernel""" )
UpperCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_output_dense/bias""" )
UpperCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_output_layer_norm/gamma""" )
UpperCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_output_layer_norm/beta""" )
# Embeddings
UpperCAmelCase = get_encoder_array("""_position_embedding_layer/embeddings""" )
UpperCAmelCase = get_encoder_array("""_type_embedding_layer/embeddings""" )
UpperCAmelCase = get_encoder_array("""_embedding_norm_layer/gamma""" )
UpperCAmelCase = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
UpperCAmelCase = model.cls.predictions.transform
UpperCAmelCase = get_masked_lm_array("""dense/kernel""" )
UpperCAmelCase = get_masked_lm_array("""dense/bias""" )
UpperCAmelCase = get_masked_lm_array("""layer_norm/gamma""" )
UpperCAmelCase = get_masked_lm_array("""layer_norm/beta""" )
UpperCAmelCase = get_masked_lm_array("""embedding_table""" )
# Pooling
UpperCAmelCase = BertPooler(config=lowerCAmelCase )
UpperCAmelCase = get_encoder_array("""_pooler_layer/kernel""" )
UpperCAmelCase = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(lowerCAmelCase )
# Integration test - should load without any errors ;)
UpperCAmelCase = BertForMaskedLM.from_pretrained(lowerCAmelCase )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow Token Dropping checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 248 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=a_ )
class UpperCamelCase_ ( a_ ):
_A : str = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_A : ClassVar[Features] = Features({'image': Image()} )
_A : ClassVar[Features] = Features({'labels': ClassLabel} )
_A : str = "image"
_A : str = "labels"
def UpperCamelCase_ ( self , snake_case__ ) -> List[str]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , snake_case__ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
UpperCAmelCase = copy.deepcopy(self )
UpperCAmelCase = self.label_schema.copy()
UpperCAmelCase = features[self.label_column]
UpperCAmelCase = label_schema
return task_template
@property
def UpperCamelCase_ ( self ) -> Dict[str, str]:
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 248 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'vit_mae'
def __init__( self : Optional[int], lowerCamelCase : Any=768, lowerCamelCase : Any=12, lowerCamelCase : Optional[Any]=12, lowerCamelCase : str=3072, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[Any]=0.0, lowerCamelCase : Union[str, Any]=0.02, lowerCamelCase : Union[str, Any]=1E-12, lowerCamelCase : Dict=224, lowerCamelCase : str=16, lowerCamelCase : str=3, lowerCamelCase : List[str]=True, lowerCamelCase : List[str]=16, lowerCamelCase : Tuple=512, lowerCamelCase : Union[str, Any]=8, lowerCamelCase : Optional[int]=2048, lowerCamelCase : Union[str, Any]=0.75, lowerCamelCase : Any=False, **lowerCamelCase : Dict, )-> str:
super().__init__(**lowerCamelCase )
lowerCamelCase__ : Tuple =hidden_size
lowerCamelCase__ : Dict =num_hidden_layers
lowerCamelCase__ : Tuple =num_attention_heads
lowerCamelCase__ : Optional[Any] =intermediate_size
lowerCamelCase__ : str =hidden_act
lowerCamelCase__ : Optional[int] =hidden_dropout_prob
lowerCamelCase__ : Optional[Any] =attention_probs_dropout_prob
lowerCamelCase__ : Dict =initializer_range
lowerCamelCase__ : Tuple =layer_norm_eps
lowerCamelCase__ : Dict =image_size
lowerCamelCase__ : Optional[int] =patch_size
lowerCamelCase__ : Optional[int] =num_channels
lowerCamelCase__ : Optional[int] =qkv_bias
lowerCamelCase__ : Union[str, Any] =decoder_num_attention_heads
lowerCamelCase__ : Any =decoder_hidden_size
lowerCamelCase__ : Any =decoder_num_hidden_layers
lowerCamelCase__ : Dict =decoder_intermediate_size
lowerCamelCase__ : Union[str, Any] =mask_ratio
lowerCamelCase__ : List[Any] =norm_pix_loss
| 238 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowercase : List[Any] = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : str=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__ : Any =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase__ : str =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase__ : Dict =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : str =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : Dict =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str, lowerCamelCase : Tuple, lowerCamelCase : List[str]=13, lowerCamelCase : Dict=7, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : int=99, lowerCamelCase : Union[str, Any]=16, lowerCamelCase : List[str]=2, lowerCamelCase : int=4, lowerCamelCase : Tuple=4, lowerCamelCase : Optional[Any]="gelu", lowerCamelCase : List[str]=0.1, lowerCamelCase : str=0.1, lowerCamelCase : Optional[int]=32, lowerCamelCase : List[str]=2, lowerCamelCase : Tuple=1, lowerCamelCase : Optional[int]=0, lowerCamelCase : int=0.02, )-> Optional[Any]:
lowerCamelCase__ : List[str] =parent
lowerCamelCase__ : Dict =batch_size
lowerCamelCase__ : Optional[int] =seq_length
lowerCamelCase__ : Any =is_training
lowerCamelCase__ : Optional[int] =use_labels
lowerCamelCase__ : List[str] =vocab_size
lowerCamelCase__ : List[Any] =hidden_size
lowerCamelCase__ : List[Any] =num_hidden_layers
lowerCamelCase__ : Tuple =num_attention_heads
lowerCamelCase__ : List[Any] =intermediate_size
lowerCamelCase__ : Union[str, Any] =hidden_act
lowerCamelCase__ : Optional[Any] =hidden_dropout_prob
lowerCamelCase__ : Tuple =attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] =max_position_embeddings
lowerCamelCase__ : List[Any] =eos_token_id
lowerCamelCase__ : Tuple =pad_token_id
lowerCamelCase__ : Union[str, Any] =bos_token_id
lowerCamelCase__ : List[Any] =initializer_range
def snake_case ( self : Optional[Any] )-> str:
lowerCamelCase__ : Dict =np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ), 3, self.vocab_size )
lowerCamelCase__ : Union[str, Any] =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.intaa )), -1 )
lowerCamelCase__ : Dict =shift_tokens_right(lowerCamelCase, 1, 2 )
lowerCamelCase__ : Optional[Any] =BlenderbotConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=lowerCamelCase, )
lowerCamelCase__ : List[str] =prepare_blenderbot_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return config, inputs_dict
def snake_case ( self : str )-> Optional[Any]:
lowerCamelCase__ , lowerCamelCase__ : Any =self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case ( self : int, lowerCamelCase : Tuple, lowerCamelCase : Dict, lowerCamelCase : Tuple )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =20
lowerCamelCase__ : Optional[int] =model_class_name(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =model.encode(inputs_dict['''input_ids'''] )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =(
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCamelCase__ : Any =model.init_cache(decoder_input_ids.shape[0], lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Dict =jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype='''i4''' )
lowerCamelCase__ : List[Any] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
lowerCamelCase__ : int =model.decode(
decoder_input_ids[:, :-1], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
lowerCamelCase__ : Optional[int] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='''i4''' )
lowerCamelCase__ : Union[str, Any] =model.decode(
decoder_input_ids[:, -1:], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=outputs_cache.past_key_values, decoder_position_ids=lowerCamelCase, )
lowerCamelCase__ : int =model.decode(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Dict =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''' )
def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : str )-> List[str]:
lowerCamelCase__ : List[Any] =20
lowerCamelCase__ : List[Any] =model_class_name(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =model.encode(inputs_dict['''input_ids'''] )
lowerCamelCase__ , lowerCamelCase__ : str =(
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCamelCase__ : Tuple =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
], axis=-1, )
lowerCamelCase__ : List[str] =model.init_cache(decoder_input_ids.shape[0], lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
lowerCamelCase__ : List[Any] =model.decode(
decoder_input_ids[:, :-1], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
lowerCamelCase__ : List[str] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='''i4''' )
lowerCamelCase__ : Optional[Any] =model.decode(
decoder_input_ids[:, -1:], lowerCamelCase, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
lowerCamelCase__ : Optional[Any] =model.decode(lowerCamelCase, lowerCamelCase, decoder_attention_mask=lowerCamelCase )
lowerCamelCase__ : List[Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''' )
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = 9_9
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
], dtype=np.intaa, )
lowerCamelCase__ : Any =input_ids.shape[0]
lowerCamelCase__ : Any =BlenderbotConfig(
vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, )
return config, input_ids, batch_size
def snake_case ( self : Any )-> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] =self._get_config_and_data()
lowerCamelCase__ : int =FlaxBlenderbotForConditionalGeneration(lowerCamelCase )
lowerCamelCase__ : str =lm_model(input_ids=lowerCamelCase )
lowerCamelCase__ : List[Any] =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape, lowerCamelCase )
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =BlenderbotConfig(
vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, )
lowerCamelCase__ : Union[str, Any] =FlaxBlenderbotForConditionalGeneration(lowerCamelCase )
lowerCamelCase__ : List[Any] =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.intaa )
lowerCamelCase__ : Optional[Any] =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.intaa )
lowerCamelCase__ : Optional[int] =lm_model(input_ids=lowerCamelCase, decoder_input_ids=lowerCamelCase )
lowerCamelCase__ : List[str] =(*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape, lowerCamelCase )
def snake_case ( self : Union[str, Any] )-> Union[str, Any]:
lowerCamelCase__ : Optional[int] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.intaa )
lowerCamelCase__ : Optional[Any] =shift_tokens_right(lowerCamelCase, 1, 2 )
lowerCamelCase__ : str =np.equal(lowerCamelCase, 1 ).astype(np.floataa ).sum()
lowerCamelCase__ : List[str] =np.equal(lowerCamelCase, 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape, input_ids.shape )
self.assertEqual(lowerCamelCase, n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0], 2 ).all() )
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
_a = True
_a = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_a = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def snake_case ( self : Union[str, Any] )-> List[str]:
lowerCamelCase__ : str =FlaxBlenderbotModelTester(self )
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[str] )-> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : List[Any] =self._prepare_for_class(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : int =model_class(lowerCamelCase )
@jax.jit
def encode_jitted(lowerCamelCase : int, lowerCamelCase : Union[str, Any]=None, **lowerCamelCase : List[str] ):
return model.encode(input_ids=lowerCamelCase, attention_mask=lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase__ : Any =encode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase__ : Dict =encode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase, lowerCamelCase ):
self.assertEqual(jitted_output.shape, output.shape )
def snake_case ( self : List[str] )-> Dict:
lowerCamelCase__ , lowerCamelCase__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : Optional[Any] =model_class(lowerCamelCase )
lowerCamelCase__ : List[Any] =model.encode(inputs_dict['''input_ids'''], inputs_dict['''attention_mask'''] )
lowerCamelCase__ : Optional[int] ={
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : Tuple ):
return model.decode(
decoder_input_ids=lowerCamelCase, decoder_attention_mask=lowerCamelCase, encoder_outputs=lowerCamelCase, )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase__ : Union[str, Any] =decode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase__ : Optional[Any] =decode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase, lowerCamelCase ):
self.assertEqual(jitted_output.shape, output.shape )
@slow
def snake_case ( self : Tuple )-> Tuple:
for model_class_name in self.all_model_classes:
lowerCamelCase__ : int =model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase__ : Union[str, Any] =np.ones((1, 1) ) * model.config.eos_token_id
lowerCamelCase__ : Optional[Any] =model(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@unittest.skipUnless(jax_device != '''cpu''', '''3B test too slow on CPU.''' )
@slow
def snake_case ( self : Optional[int] )-> Tuple:
lowerCamelCase__ : List[Any] ={'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
lowerCamelCase__ : Optional[int] ={'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
lowerCamelCase__ : Tuple =FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''', from_pt=lowerCamelCase )
lowerCamelCase__ : int =BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
lowerCamelCase__ : str =['''Sam''']
lowerCamelCase__ : Union[str, Any] =tokenizer(lowerCamelCase, return_tensors='''jax''' )
lowerCamelCase__ : Tuple =model.generate(**lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : Tuple ='''Sam is a great name. It means "sun" in Gaelic.'''
lowerCamelCase__ : Union[str, Any] =tokenizer.batch_decode(lowerCamelCase, **lowerCamelCase )
assert generated_txt[0].strip() == tgt_text
| 238 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __A ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : List[Any] = StableUnCLIPImgaImgPipeline
lowerCAmelCase_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCAmelCase_ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase_ : Dict = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase_ : Any = frozenset([] )
def lowercase__ ( self : int ):
lowerCAmelCase : str = 32
lowerCAmelCase : List[str] = embedder_hidden_size
# image encoding components
lowerCAmelCase : List[Any] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
lowerCAmelCase : Optional[int] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCAmelCase_ , projection_dim=UpperCAmelCase_ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase : Dict = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase_ )
lowerCAmelCase : List[str] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
lowerCAmelCase : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase_ , layers_per_block=1 , upcast_attention=UpperCAmelCase_ , use_linear_projection=UpperCAmelCase_ , )
torch.manual_seed(0 )
lowerCAmelCase : Union[str, Any] = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='v_prediction' , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase : Any = AutoencoderKL()
lowerCAmelCase : Optional[int] = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Optional[int]=True ):
if str(UpperCAmelCase_ ).startswith('mps' ):
lowerCAmelCase : Union[str, Any] = torch.manual_seed(UpperCAmelCase_ )
else:
lowerCAmelCase : Optional[Any] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCAmelCase : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
if pil_image:
lowerCAmelCase : str = input_image * 0.5 + 0.5
lowerCAmelCase : str = input_image.clamp(0 , 1 )
lowerCAmelCase : Optional[int] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase : Any = DiffusionPipeline.numpy_to_pil(UpperCAmelCase_ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowercase__ ( self : Any ):
lowerCAmelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Any = self.get_dummy_components()
lowerCAmelCase : Any = StableUnCLIPImgaImgPipeline(**UpperCAmelCase_ )
lowerCAmelCase : int = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(UpperCAmelCase_ )
inputs.update({'image_embeds': None} )
lowerCAmelCase : Tuple = sd_pipe(**UpperCAmelCase_ ).images
lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase : Tuple = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase_ )
def lowercase__ ( self : int ):
lowerCAmelCase : Any = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase_ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase__ ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCAmelCase_ )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def lowercase__ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : int ):
lowerCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
lowerCAmelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
lowerCAmelCase : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase : Tuple = pipe(UpperCAmelCase_ , 'anime turle' , generator=UpperCAmelCase_ , output_type='np' )
lowerCAmelCase : int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
lowerCAmelCase : Dict = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase : Any = pipe(UpperCAmelCase_ , 'anime turle' , generator=UpperCAmelCase_ , output_type='np' )
lowerCAmelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase : Optional[int] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
lowerCAmelCase : Optional[Any] = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase : Union[str, Any] = pipe(
UpperCAmelCase_ , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 370 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase : str = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCAmelCase : Tuple = {'unk_token': '<unk>'}
lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase_ ) )
lowerCAmelCase : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Any , **UpperCAmelCase_ : Dict ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Tuple , **UpperCAmelCase_ : str ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = self.get_tokenizer()
lowerCAmelCase : List[str] = self.get_rust_tokenizer()
lowerCAmelCase : Optional[int] = self.get_image_processor()
lowerCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase : int = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Any = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = self.prepare_image_inputs()
lowerCAmelCase : List[str] = image_processor(UpperCAmelCase_ , return_tensors='np' )
lowerCAmelCase : int = processor(images=UpperCAmelCase_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : Dict = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = 'lower newer'
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = 'lower newer'
lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase : Union[str, Any] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : str = self.get_tokenizer()
lowerCAmelCase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase : Any = processor.batch_decode(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = 'lower newer'
lowerCAmelCase : Tuple = self.prepare_image_inputs()
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 323 | 0 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_UpperCAmelCase : Any = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
_UpperCAmelCase : str = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
_UpperCAmelCase : List[str] = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
_UpperCAmelCase : Optional[Any] = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
_UpperCAmelCase : Any = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __lowerCAmelCase ( datasets.Metric):
def SCREAMING_SNAKE_CASE ( self: str ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Optional[Any]=[1, 10, 1_00] , _lowerCAmelCase: int=4 , _lowerCAmelCase: str=3.0 ):
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=_lowerCAmelCase ) as executor:
lowercase :Optional[int] = []
lowercase :Optional[Any] = Counter()
lowercase :str = 0
lowercase :Any = defaultdict(_lowerCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(_lowerCAmelCase , _lowerCAmelCase ) ):
for candidate in candidates:
lowercase :int = candidate + "\n" + test_case
lowercase :List[str] = (test_program, timeout, task_id, completion_id[task_id])
lowercase :Union[str, Any] = executor.submit(_lowerCAmelCase , *_lowerCAmelCase )
futures.append(_lowerCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_lowerCAmelCase ):
lowercase :Dict = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
lowercase , lowercase :Union[str, Any] = [], []
for result in results.values():
result.sort()
lowercase :Tuple = [r[1]["passed"] for r in result]
total.append(len(_lowerCAmelCase ) )
correct.append(sum(_lowerCAmelCase ) )
lowercase :Dict = np.array(_lowerCAmelCase )
lowercase :Optional[Any] = np.array(_lowerCAmelCase )
lowercase :Tuple = k
lowercase :Any = {F"pass@{k}": estimate_pass_at_k(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
def estimator(lowerCamelCase, lowerCamelCase, lowerCamelCase ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1 ) )
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase :Tuple = itertools.repeat(lowerCamelCase, len(lowerCamelCase ) )
else:
assert len(lowerCamelCase ) == len(lowerCamelCase )
lowercase :Optional[Any] = iter(lowerCamelCase )
return np.array([estimator(int(lowerCamelCase ), int(lowerCamelCase ), lowerCamelCase ) for n, c in zip(lowerCamelCase, lowerCamelCase )] )
| 236 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_UpperCAmelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCAmelCase : Dict = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase=8 ):
lowercase :List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase :List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: Tuple , _lowerCAmelCase: UNetaDConditionModel , _lowerCAmelCase: DDPMScheduler , _lowerCAmelCase: VQModel , ):
super().__init__()
self.register_modules(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , movq=_lowerCAmelCase , )
lowercase :List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: Dict , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Any , _lowerCAmelCase: Tuple , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[int] ):
if latents is None:
lowercase :int = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
lowercase :Optional[Any] = latents.to(_lowerCAmelCase )
lowercase :int = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase :List[Any] = torch.device(F"cuda:{gpu_id}" )
lowercase :List[str] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: Dict=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase :List[Any] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase :List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase , lowercase :Dict = cpu_offload_with_hook(_lowerCAmelCase , _lowerCAmelCase , prev_module_hook=_lowerCAmelCase )
# We'll offload the last model manually.
lowercase :Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE ( self: int ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCAmelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowerCAmelCase )
def __call__( self: str , _lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowerCAmelCase: int = 5_12 , _lowerCAmelCase: int = 5_12 , _lowerCAmelCase: int = 1_00 , _lowerCAmelCase: float = 4.0 , _lowerCAmelCase: int = 1 , _lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase: Optional[torch.FloatTensor] = None , _lowerCAmelCase: Optional[str] = "pil" , _lowerCAmelCase: bool = True , ):
lowercase :str = self._execution_device
lowercase :List[str] = guidance_scale > 1.0
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :Any = torch.cat(_lowerCAmelCase , dim=0 )
lowercase :Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :List[Any] = torch.cat(_lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
lowercase :int = image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
lowercase :Any = negative_image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
lowercase :Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCAmelCase )
self.scheduler.set_timesteps(_lowerCAmelCase , device=_lowerCAmelCase )
lowercase :Optional[Any] = self.scheduler.timesteps
lowercase :Tuple = self.unet.config.in_channels
lowercase , lowercase :Optional[int] = downscale_height_and_width(_lowerCAmelCase , _lowerCAmelCase , self.movq_scale_factor )
# create initial latent
lowercase :List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase :Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase :List[str] = {"image_embeds": image_embeds}
lowercase :List[Any] = self.unet(
sample=_lowerCAmelCase , timestep=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , added_cond_kwargs=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
if do_classifier_free_guidance:
lowercase , lowercase :List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase , lowercase :Any = noise_pred.chunk(2 )
lowercase , lowercase :Dict = variance_pred.chunk(2 )
lowercase :str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase :Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase , lowercase :str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase :Tuple = self.scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase , )[0]
# post-processing
lowercase :Dict = self.movq.decode(_lowerCAmelCase , force_not_quantize=_lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowercase :Any = image * 0.5 + 0.5
lowercase :Tuple = image.clamp(0 , 1 )
lowercase :List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase :List[Any] = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 236 | 1 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowercase__ : str = logging.getLogger(__name__)
def __lowercase ( _a , _a ):
snake_case_ : Union[str, Any] = np.argmax(_a , axis=1 )
return np.sum(outputs == labels )
def __lowercase ( _a ):
with open(_a , encoding='''utf_8''' ) as f:
snake_case_ : List[str] = csv.reader(_a )
snake_case_ : int = []
next(_a ) # skip the first line
for line in tqdm(_a ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __lowercase ( _a , _a , _a , _a , _a , _a ):
snake_case_ : Optional[Any] = []
for dataset in encoded_datasets:
snake_case_ : str = len(_a )
snake_case_ : Optional[Any] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
snake_case_ : Any = np.zeros((n_batch, 2) , dtype=np.intaa )
snake_case_ : Optional[int] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
snake_case_ : int = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_a ):
snake_case_ : Optional[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
snake_case_ : int = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
snake_case_ : Optional[Any] = with_conta
snake_case_ : Tuple = with_conta
snake_case_ : Tuple = len(_a ) - 1
snake_case_ : List[str] = len(_a ) - 1
snake_case_ : List[Any] = with_conta
snake_case_ : Any = with_conta
snake_case_ : Dict = mc_label
snake_case_ : str = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_a ) for t in all_inputs ) )
return tensor_datasets
def __lowercase ( ):
snake_case_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=_a , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=_a , type=_a , required=_a , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=_a , default='''''' )
parser.add_argument('''--eval_dataset''' , type=_a , default='''''' )
parser.add_argument('''--seed''' , type=_a , default=42 )
parser.add_argument('''--num_train_epochs''' , type=_a , default=3 )
parser.add_argument('''--train_batch_size''' , type=_a , default=8 )
parser.add_argument('''--eval_batch_size''' , type=_a , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=_a , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=_a , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=_a , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_a , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=_a , default=6.25E-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=_a , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=_a , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=_a , default=0.01 )
parser.add_argument('''--lm_coef''' , type=_a , default=0.9 )
parser.add_argument('''--n_valid''' , type=_a , default=374 )
parser.add_argument('''--server_ip''' , type=_a , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_a , default='''''' , help='''Can be used for distant debugging.''' )
snake_case_ : Dict = parser.parse_args()
print(_a )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_a )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
snake_case_ : List[str] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case_ : Union[str, Any] = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(_a , _a ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
snake_case_ : Union[str, Any] = ['''_start_''', '''_delimiter_''', '''_classify_''']
snake_case_ : Any = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_a )
snake_case_ : str = tokenizer.convert_tokens_to_ids(_a )
snake_case_ : Any = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_a ) )
model.to(_a )
# Load and encode the datasets
def tokenize_and_encode(_a ):
if isinstance(_a , _a ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_a ) )
elif isinstance(_a , _a ):
return obj
return [tokenize_and_encode(_a ) for o in obj]
logger.info('''Encoding dataset...''' )
snake_case_ : int = load_rocstories_dataset(args.train_dataset )
snake_case_ : List[str] = load_rocstories_dataset(args.eval_dataset )
snake_case_ : str = (train_dataset, eval_dataset)
snake_case_ : Optional[int] = tokenize_and_encode(_a )
# Compute the max input length for the Transformer
snake_case_ : str = model.config.n_positions // 2 - 2
snake_case_ : str = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
snake_case_ : Any = min(_a , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
snake_case_ : List[str] = pre_process_datasets(_a , _a , _a , *_a )
snake_case_, snake_case_ : Dict = tensor_datasets[0], tensor_datasets[1]
snake_case_ : int = TensorDataset(*_a )
snake_case_ : Optional[Any] = RandomSampler(_a )
snake_case_ : int = DataLoader(_a , sampler=_a , batch_size=args.train_batch_size )
snake_case_ : List[Any] = TensorDataset(*_a )
snake_case_ : str = SequentialSampler(_a )
snake_case_ : Union[str, Any] = DataLoader(_a , sampler=_a , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
snake_case_ : str = args.max_steps
snake_case_ : Tuple = args.max_steps // (len(_a ) // args.gradient_accumulation_steps) + 1
else:
snake_case_ : Optional[Any] = len(_a ) // args.gradient_accumulation_steps * args.num_train_epochs
snake_case_ : Optional[int] = list(model.named_parameters() )
snake_case_ : Any = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
snake_case_ : Any = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
snake_case_ : Any = AdamW(_a , lr=args.learning_rate , eps=args.adam_epsilon )
snake_case_ : Union[str, Any] = get_linear_schedule_with_warmup(
_a , num_warmup_steps=args.warmup_steps , num_training_steps=_a )
if args.do_train:
snake_case_, snake_case_, snake_case_ : List[str] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
snake_case_ : str = 0
snake_case_ : Dict = 0
snake_case_ : List[str] = tqdm(_a , desc='''Training''' )
for step, batch in enumerate(_a ):
snake_case_ : str = tuple(t.to(_a ) for t in batch )
snake_case_, snake_case_, snake_case_, snake_case_ : int = batch
snake_case_ : int = model(_a , mc_token_ids=_a , lm_labels=_a , mc_labels=_a )
snake_case_ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
snake_case_ : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
snake_case_ : Dict = '''Training loss: {:.2e} lr: {:.2e}'''.format(_a , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
snake_case_ : Optional[Any] = model.module if hasattr(_a , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
snake_case_ : Optional[Any] = os.path.join(args.output_dir , _a )
snake_case_ : Optional[int] = os.path.join(args.output_dir , _a )
torch.save(model_to_save.state_dict() , _a )
model_to_save.config.to_json_file(_a )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
snake_case_ : List[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
snake_case_ : int = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_a )
if args.do_eval:
model.eval()
snake_case_, snake_case_ : Dict = 0, 0
snake_case_, snake_case_ : List[Any] = 0, 0
for batch in tqdm(_a , desc='''Evaluating''' ):
snake_case_ : Dict = tuple(t.to(_a ) for t in batch )
snake_case_, snake_case_, snake_case_, snake_case_ : Optional[Any] = batch
with torch.no_grad():
snake_case_, snake_case_, snake_case_, snake_case_ : Dict = model(
_a , mc_token_ids=_a , lm_labels=_a , mc_labels=_a )
snake_case_ : List[Any] = mc_logits.detach().cpu().numpy()
snake_case_ : str = mc_labels.to('''cpu''' ).numpy()
snake_case_ : List[str] = accuracy(_a , _a )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
snake_case_ : Union[str, Any] = eval_loss / nb_eval_steps
snake_case_ : str = eval_accuracy / nb_eval_examples
snake_case_ : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
snake_case_ : Tuple = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
snake_case_ : str = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(_a , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _a , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 155 |
"""simple docstring"""
import os
def __lowercase ( _a ):
snake_case_ : Tuple = len(grid[0] )
snake_case_ : Optional[int] = len(_a )
snake_case_ : Union[str, Any] = 0
snake_case_ : Union[str, Any] = 0
snake_case_ : List[Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_a ):
for j in range(n_rows - 3 ):
snake_case_ : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
snake_case_ : int = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
snake_case_ : Dict = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
snake_case_ : List[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
snake_case_ : List[str] = max(
_a , _a , _a , _a )
if max_product > largest:
snake_case_ : str = max_product
return largest
def __lowercase ( ):
snake_case_ : Tuple = []
with open(os.path.dirname(_a ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
snake_case_ : List[str] = [[int(_a ) for i in grid[j]] for j in range(len(_a ) )]
return largest_product(_a )
if __name__ == "__main__":
print(solution())
| 155 | 1 |
'''simple docstring'''
A_ = [
9_99,
8_00,
7_99,
6_00,
5_99,
5_00,
4_00,
3_99,
3_77,
3_55,
3_33,
3_11,
2_88,
2_66,
2_44,
2_22,
2_00,
1_99,
1_77,
1_55,
1_33,
1_11,
88,
66,
44,
22,
0,
]
A_ = [
9_99,
9_76,
9_52,
9_28,
9_05,
8_82,
8_58,
8_57,
8_10,
7_62,
7_15,
7_14,
5_72,
4_29,
4_28,
2_86,
2_85,
2_38,
1_90,
1_43,
1_42,
1_18,
95,
71,
47,
24,
0,
]
A_ = [
9_99,
9_88,
9_77,
9_66,
9_55,
9_44,
9_33,
9_22,
9_11,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_50,
3_00,
2_99,
2_66,
2_33,
2_00,
1_99,
1_79,
1_59,
1_40,
1_20,
1_00,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
A_ = [
9_99,
9_95,
9_92,
9_89,
9_85,
9_81,
9_78,
9_75,
9_71,
9_67,
9_64,
9_61,
9_57,
9_56,
9_51,
9_47,
9_42,
9_37,
9_33,
9_28,
9_23,
9_19,
9_14,
9_13,
9_08,
9_03,
8_97,
8_92,
8_87,
8_81,
8_76,
8_71,
8_70,
8_64,
8_58,
8_52,
8_46,
8_40,
8_34,
8_28,
8_27,
8_20,
8_13,
8_06,
7_99,
7_92,
7_85,
7_84,
7_77,
7_70,
7_63,
7_56,
7_49,
7_42,
7_41,
7_33,
7_24,
7_16,
7_07,
6_99,
6_98,
6_88,
6_77,
6_66,
6_56,
6_55,
6_45,
6_34,
6_23,
6_13,
6_12,
5_98,
5_84,
5_70,
5_69,
5_55,
5_41,
5_27,
5_26,
5_05,
4_84,
4_83,
4_62,
4_40,
4_39,
3_96,
3_95,
3_52,
3_51,
3_08,
3_07,
2_64,
2_63,
2_20,
2_19,
1_76,
1_32,
88,
44,
0,
]
A_ = [
9_99,
9_97,
9_95,
9_92,
9_90,
9_88,
9_86,
9_84,
9_81,
9_79,
9_77,
9_75,
9_72,
9_70,
9_68,
9_66,
9_64,
9_61,
9_59,
9_57,
9_56,
9_54,
9_51,
9_49,
9_46,
9_44,
9_41,
9_39,
9_36,
9_34,
9_31,
9_29,
9_26,
9_24,
9_21,
9_19,
9_16,
9_14,
9_13,
9_10,
9_07,
9_05,
9_02,
8_99,
8_96,
8_93,
8_91,
8_88,
8_85,
8_82,
8_79,
8_77,
8_74,
8_71,
8_70,
8_67,
8_64,
8_61,
8_58,
8_55,
8_52,
8_49,
8_46,
8_43,
8_40,
8_37,
8_34,
8_31,
8_28,
8_27,
8_24,
8_21,
8_17,
8_14,
8_11,
8_08,
8_04,
8_01,
7_98,
7_95,
7_91,
7_88,
7_85,
7_84,
7_80,
7_77,
7_74,
7_70,
7_66,
7_63,
7_60,
7_56,
7_52,
7_49,
7_46,
7_42,
7_41,
7_37,
7_33,
7_30,
7_26,
7_22,
7_18,
7_14,
7_10,
7_07,
7_03,
6_99,
6_98,
6_94,
6_90,
6_85,
6_81,
6_77,
6_73,
6_69,
6_64,
6_60,
6_56,
6_55,
6_50,
6_46,
6_41,
6_36,
6_32,
6_27,
6_22,
6_18,
6_13,
6_12,
6_07,
6_02,
5_96,
5_91,
5_86,
5_80,
5_75,
5_70,
5_69,
5_63,
5_57,
5_51,
5_45,
5_39,
5_33,
5_27,
5_26,
5_19,
5_12,
5_05,
4_98,
4_91,
4_84,
4_83,
4_74,
4_66,
4_57,
4_49,
4_40,
4_39,
4_28,
4_18,
4_07,
3_96,
3_95,
3_81,
3_66,
3_52,
3_51,
3_30,
3_08,
3_07,
2_86,
2_64,
2_63,
2_42,
2_20,
2_19,
1_76,
1_75,
1_32,
1_31,
88,
44,
0,
]
A_ = [
9_99,
9_91,
9_82,
9_74,
9_66,
9_58,
9_50,
9_41,
9_33,
9_25,
9_16,
9_08,
9_00,
8_99,
8_74,
8_50,
8_25,
8_00,
7_99,
7_00,
6_00,
5_00,
4_00,
3_00,
2_00,
1_00,
0,
]
A_ = [
9_99,
9_92,
9_85,
9_78,
9_71,
9_64,
9_57,
9_49,
9_42,
9_35,
9_28,
9_21,
9_14,
9_07,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_00,
2_99,
2_00,
1_99,
1_00,
99,
0,
]
A_ = [
9_99,
9_96,
9_92,
9_89,
9_85,
9_82,
9_79,
9_75,
9_72,
9_68,
9_65,
9_61,
9_58,
9_55,
9_51,
9_48,
9_44,
9_41,
9_38,
9_34,
9_31,
9_27,
9_24,
9_20,
9_17,
9_14,
9_10,
9_07,
9_03,
9_00,
8_99,
8_91,
8_84,
8_76,
8_69,
8_61,
8_53,
8_46,
8_38,
8_30,
8_23,
8_15,
8_08,
8_00,
7_99,
7_88,
7_77,
7_66,
7_55,
7_44,
7_33,
7_22,
7_11,
7_00,
6_99,
6_88,
6_77,
6_66,
6_55,
6_44,
6_33,
6_22,
6_11,
6_00,
5_99,
5_85,
5_71,
5_57,
5_42,
5_28,
5_14,
5_00,
4_99,
4_85,
4_71,
4_57,
4_42,
4_28,
4_14,
4_00,
3_99,
3_79,
3_59,
3_40,
3_20,
3_00,
2_99,
2_79,
2_59,
2_40,
2_20,
2_00,
1_99,
1_66,
1_33,
1_00,
99,
66,
33,
0,
]
| 139 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def A_ ( snake_case ):
return np.maximum(0 , snake_case )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 139 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = tempfile.mkdtemp()
# fmt: off
__magic_name__ : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__magic_name__ : Tuple = dict(zip(_a , range(len(_a ) ) ) )
__magic_name__ : Union[str, Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
__magic_name__ : Dict = {"unk_token": "<unk>"}
__magic_name__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__magic_name__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_a ) )
__magic_name__ : Optional[int] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
__magic_name__ : Dict = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_a , _a )
def SCREAMING_SNAKE_CASE ( self , **_a ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def SCREAMING_SNAKE_CASE ( self , **_a ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def SCREAMING_SNAKE_CASE ( self , **_a ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def SCREAMING_SNAKE_CASE ( self ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__magic_name__ : Optional[Any] = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = self.get_tokenizer()
__magic_name__ : List[str] = self.get_rust_tokenizer()
__magic_name__ : Dict = self.get_image_processor()
__magic_name__ : Dict = CLIPSegProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
__magic_name__ : Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
__magic_name__ : List[Any] = CLIPSegProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
__magic_name__ : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__magic_name__ : str = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
__magic_name__ : Dict = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = self.get_image_processor()
__magic_name__ : int = self.get_tokenizer()
__magic_name__ : Optional[int] = CLIPSegProcessor(tokenizer=_a , image_processor=_a )
__magic_name__ : Optional[int] = self.prepare_image_inputs()
__magic_name__ : Optional[Any] = image_processor(_a , return_tensors="np" )
__magic_name__ : Tuple = processor(images=_a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.get_image_processor()
__magic_name__ : Any = self.get_tokenizer()
__magic_name__ : Dict = CLIPSegProcessor(tokenizer=_a , image_processor=_a )
__magic_name__ : Any = "lower newer"
__magic_name__ : str = processor(text=_a )
__magic_name__ : Optional[Any] = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.get_image_processor()
__magic_name__ : List[Any] = self.get_tokenizer()
__magic_name__ : Dict = CLIPSegProcessor(tokenizer=_a , image_processor=_a )
__magic_name__ : str = "lower newer"
__magic_name__ : str = self.prepare_image_inputs()
__magic_name__ : int = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.get_image_processor()
__magic_name__ : str = self.get_tokenizer()
__magic_name__ : List[Any] = CLIPSegProcessor(tokenizer=_a , image_processor=_a )
__magic_name__ : Union[str, Any] = self.prepare_image_inputs()
__magic_name__ : str = self.prepare_image_inputs()
__magic_name__ : List[str] = processor(images=_a , visual_prompt=_a )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = self.get_image_processor()
__magic_name__ : Any = self.get_tokenizer()
__magic_name__ : Tuple = CLIPSegProcessor(tokenizer=_a , image_processor=_a )
__magic_name__ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__ : str = processor.batch_decode(_a )
__magic_name__ : List[Any] = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
| 364 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
class _snake_case :
UpperCamelCase__ = 42
UpperCamelCase__ = None
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
raise NotImplementedError
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , **_a ):
raise NotImplementedError
def SCREAMING_SNAKE_CASE ( self , _a ):
raise NotImplementedError
def SCREAMING_SNAKE_CASE ( self ):
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def SCREAMING_SNAKE_CASE ( cls ):
return f'''`pip install {cls.pip_package or cls.name}`'''
class _snake_case ( snake_case ):
UpperCamelCase__ = 'optuna'
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
return is_optuna_available()
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , **_a ):
return run_hp_search_optuna(_a , _a , _a , **_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return default_hp_space_optuna(_a )
class _snake_case ( snake_case ):
UpperCamelCase__ = 'ray'
UpperCamelCase__ = '\'ray[tune]\''
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
return is_ray_available()
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , **_a ):
return run_hp_search_ray(_a , _a , _a , **_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return default_hp_space_ray(_a )
class _snake_case ( snake_case ):
UpperCamelCase__ = 'sigopt'
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
return is_sigopt_available()
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , **_a ):
return run_hp_search_sigopt(_a , _a , _a , **_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return default_hp_space_sigopt(_a )
class _snake_case ( snake_case ):
UpperCamelCase__ = 'wandb'
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
return is_wandb_available()
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , **_a ):
return run_hp_search_wandb(_a , _a , _a , **_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return default_hp_space_wandb(_a )
snake_case : int = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
__magic_name__ : List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_snake_case ) > 0:
__magic_name__ : Dict = available_backends[0].name
if len(_snake_case ) > 1:
logger.info(
F'''{len(_snake_case )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 41 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[str] = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Union[str, Any] = '''camembert'''
def __init__( self : int , _snake_case : Optional[Any]=3_0522 , _snake_case : str=768 , _snake_case : Tuple=12 , _snake_case : List[str]=12 , _snake_case : Optional[int]=3072 , _snake_case : Optional[Any]="gelu" , _snake_case : Tuple=0.1 , _snake_case : Optional[int]=0.1 , _snake_case : List[Any]=512 , _snake_case : Any=2 , _snake_case : Optional[int]=0.02 , _snake_case : List[Any]=1E-1_2 , _snake_case : Optional[Any]=1 , _snake_case : Dict=0 , _snake_case : List[Any]=2 , _snake_case : Tuple="absolute" , _snake_case : Union[str, Any]=True , _snake_case : int=None , **_snake_case : List[Any] , ):
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
__lowercase : List[str] = vocab_size
__lowercase : Tuple = hidden_size
__lowercase : Optional[int] = num_hidden_layers
__lowercase : Optional[Any] = num_attention_heads
__lowercase : Optional[int] = hidden_act
__lowercase : Dict = intermediate_size
__lowercase : int = hidden_dropout_prob
__lowercase : Optional[int] = attention_probs_dropout_prob
__lowercase : Any = max_position_embeddings
__lowercase : str = type_vocab_size
__lowercase : int = initializer_range
__lowercase : List[str] = layer_norm_eps
__lowercase : List[Any] = position_embedding_type
__lowercase : Union[str, Any] = use_cache
__lowercase : List[Any] = classifier_dropout
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
@property
def snake_case_ ( self : Dict ):
if self.task == "multiple-choice":
__lowercase : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 156 |
from math import isqrt, loga
def UpperCAmelCase_ ( __lowerCAmelCase ) -> list[int]:
__lowercase : Optional[Any] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __lowerCAmelCase , __lowerCAmelCase ):
__lowercase : Dict = False
return [i for i in range(2 , __lowerCAmelCase ) if is_prime[i]]
def UpperCAmelCase_ ( __lowerCAmelCase = 800_800 , __lowerCAmelCase = 800_800 ) -> int:
__lowercase : Tuple = degree * loga(__lowerCAmelCase )
__lowercase : List[str] = int(__lowerCAmelCase )
__lowercase : Optional[Any] = calculate_prime_numbers(__lowerCAmelCase )
__lowercase : Any = 0
__lowercase : int = 0
__lowercase : Tuple = len(__lowerCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'{solution() = }')
| 156 | 1 |
'''simple docstring'''
class A_ :
def __init__( self : Optional[Any] ):
_UpperCAmelCase = ""
_UpperCAmelCase = ""
_UpperCAmelCase = []
def lowercase ( self : Optional[int] , snake_case_ : int , snake_case_ : int ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_UpperCAmelCase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_UpperCAmelCase = self.__min_dist_top_down_dp(snake_case_ , n - 1 )
_UpperCAmelCase = self.__min_dist_top_down_dp(m - 1 , snake_case_ )
_UpperCAmelCase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
_UpperCAmelCase = 1 + min(snake_case_ , snake_case_ , snake_case_ )
return self.dp[m][n]
def lowercase ( self : Any , snake_case_ : str , snake_case_ : str ):
_UpperCAmelCase = worda
_UpperCAmelCase = worda
_UpperCAmelCase = [[-1 for _ in range(len(snake_case_ ) )] for _ in range(len(snake_case_ ) )]
return self.__min_dist_top_down_dp(len(snake_case_ ) - 1 , len(snake_case_ ) - 1 )
def lowercase ( self : List[str] , snake_case_ : str , snake_case_ : str ):
_UpperCAmelCase = worda
_UpperCAmelCase = worda
_UpperCAmelCase = len(snake_case_ )
_UpperCAmelCase = len(snake_case_ )
_UpperCAmelCase = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_UpperCAmelCase = j
elif j == 0: # second string is empty
_UpperCAmelCase = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_UpperCAmelCase = self.dp[i - 1][j - 1]
else:
_UpperCAmelCase = self.dp[i][j - 1]
_UpperCAmelCase = self.dp[i - 1][j]
_UpperCAmelCase = self.dp[i - 1][j - 1]
_UpperCAmelCase = 1 + min(snake_case_ , snake_case_ , snake_case_ )
return self.dp[m][n]
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :str = EditDistance()
print('''****************** Testing Edit Distance DP Algorithm ******************''')
print()
__SCREAMING_SNAKE_CASE :Any = input('''Enter the first string: ''').strip()
__SCREAMING_SNAKE_CASE :Dict = input('''Enter the second string: ''').strip()
print()
print(F"The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}")
print(F"The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}")
print()
print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
| 359 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : int ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = [False] * len(__lowercase )
_UpperCAmelCase = []
queue.append(__lowercase )
_UpperCAmelCase = True
while queue:
_UpperCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowercase )
_UpperCAmelCase = True
_UpperCAmelCase = u
return visited[t]
def UpperCAmelCase_ ( __lowercase : int , __lowercase : List[Any] , __lowercase : List[str] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = [-1] * (len(__lowercase ))
_UpperCAmelCase = 0
while bfs(__lowercase , __lowercase , __lowercase , __lowercase ):
_UpperCAmelCase = float("Inf" )
_UpperCAmelCase = sink
while s != source:
# Find the minimum value in select path
_UpperCAmelCase = min(__lowercase , graph[parent[s]][s] )
_UpperCAmelCase = parent[s]
max_flow += path_flow
_UpperCAmelCase = sink
while v != source:
_UpperCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCAmelCase = parent[v]
return max_flow
__SCREAMING_SNAKE_CASE :Union[str, Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 156 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = """imagegpt"""
a__ : int = ["""past_key_values"""]
a__ : List[str] = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowercase=512 + 1 , __lowercase=32 * 32 , __lowercase=512 , __lowercase=24 , __lowercase=8 , __lowercase=None , __lowercase="quick_gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1E-5 , __lowercase=0.02 , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase=False , __lowercase=False , **__lowercase , ) -> Optional[Any]:
__UpperCamelCase :Optional[int] = vocab_size
__UpperCamelCase :Any = n_positions
__UpperCamelCase :int = n_embd
__UpperCamelCase :Optional[int] = n_layer
__UpperCamelCase :Optional[int] = n_head
__UpperCamelCase :Optional[Any] = n_inner
__UpperCamelCase :str = activation_function
__UpperCamelCase :Tuple = resid_pdrop
__UpperCamelCase :Dict = embd_pdrop
__UpperCamelCase :Optional[Any] = attn_pdrop
__UpperCamelCase :Optional[Any] = layer_norm_epsilon
__UpperCamelCase :Tuple = initializer_range
__UpperCamelCase :Union[str, Any] = scale_attn_weights
__UpperCamelCase :Tuple = use_cache
__UpperCamelCase :Optional[int] = scale_attn_by_inverse_layer_idx
__UpperCamelCase :Tuple = reorder_and_upcast_attn
__UpperCamelCase :int = tie_word_embeddings
super().__init__(tie_word_embeddings=__lowercase , **__lowercase)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
])
def UpperCamelCase__ ( self , __lowercase , __lowercase = 1 , __lowercase = -1 , __lowercase = False , __lowercase = None , __lowercase = 3 , __lowercase = 32 , __lowercase = 32 , ) -> Mapping[str, Any]:
__UpperCamelCase :Optional[int] = self._generate_dummy_images(__lowercase , __lowercase , __lowercase , __lowercase)
__UpperCamelCase :List[str] = dict(preprocessor(images=__lowercase , return_tensors=__lowercase))
return inputs
| 43 |
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
__lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i]
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCAmelCase = []
__lowerCAmelCase = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCAmelCase = 0
__lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 301 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 370 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=4 , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = True
__lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = True
snake_case_ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__lowerCamelCase = model(lowerCamelCase__ )[0]
__lowerCamelCase = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
__lowerCamelCase = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
__lowerCamelCase = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__lowerCamelCase = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
__lowerCamelCase = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 348 | 0 |
def __lowercase ( _UpperCamelCase ) ->list:
"""simple docstring"""
for i in range(len(a__ ) - 1, 0, -1 ):
lowercase : Union[str, Any] = False
for j in range(a__, 0, -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase , lowercase : str = unsorted[j - 1], unsorted[j]
lowercase : Optional[int] = True
for j in range(a__ ):
if unsorted[j] > unsorted[j + 1]:
lowercase , lowercase : Any = unsorted[j + 1], unsorted[j]
lowercase : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = input('''Enter numbers separated by a comma:\n''').strip()
__a = [int(item) for item in user_input.split(''',''')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 337 |
def __lowercase ( a__ ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowerCAmelCase__ : Optional[Any] =int(input('''Enter number: ''').strip())
print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
| 257 | 0 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class A ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any],lowercase_ : int = 1_6,lowercase_ : int = 8_8,lowercase_ : Optional[int] = None,lowercase_ : int = 1,lowercase_ : float = 0.0,lowercase_ : int = 3_2,lowercase_ : Optional[int] = None,lowercase_ : bool = False,lowercase_ : Optional[int] = None,lowercase_ : Optional[int] = None,lowercase_ : str = "geglu",lowercase_ : Optional[int] = None,)-> List[Any]:
'''simple docstring'''
super().__init__()
A__ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowercase_,attention_head_dim=lowercase_,in_channels=lowercase_,num_layers=lowercase_,dropout=lowercase_,norm_num_groups=lowercase_,cross_attention_dim=lowercase_,attention_bias=lowercase_,sample_size=lowercase_,num_vector_embeds=lowercase_,activation_fn=lowercase_,num_embeds_ada_norm=lowercase_,)
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
A__ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
A__ = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
A__ = [1, 0]
def snake_case__ ( self : List[Any],lowercase_ : Optional[int],lowercase_ : List[Any],lowercase_ : Tuple=None,lowercase_ : Union[str, Any]=None,lowercase_ : List[str]=None,lowercase_ : bool = True,)-> Any:
'''simple docstring'''
A__ = hidden_states
A__ = []
A__ = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
A__ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
A__ = self.transformer_index_for_condition[i]
A__ = self.transformers[transformer_index](
lowercase_,encoder_hidden_states=lowercase_,timestep=lowercase_,cross_attention_kwargs=lowercase_,return_dict=lowercase_,)[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
A__ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
A__ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowercase_ )
| 282 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class A ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] )-> List[str]:
'''simple docstring'''
super().__init__()
A__ = nn.Linear(3,4 )
A__ = nn.BatchNormad(4 )
A__ = nn.Linear(4,5 )
def snake_case__ ( self : Dict,lowercase_ : Union[str, Any] )-> Tuple:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(lowercase_ ) ) )
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] )-> Any:
'''simple docstring'''
A__ = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase_,model.state_dict() )
A__ = os.path.join(lowercase_,'index.json' )
self.assertTrue(os.path.isfile(lowercase_ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
A__ = os.path.join(lowercase_,F'{key}.dat' )
self.assertTrue(os.path.isfile(lowercase_ ) )
# TODO: add tests on the fact weights are properly loaded
def snake_case__ ( self : List[Any] )-> Optional[int]:
'''simple docstring'''
A__ = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
A__ = torch.randn(2,3,dtype=lowercase_ )
with TemporaryDirectory() as tmp_dir:
A__ = offload_weight(lowercase_,'weight',lowercase_,{} )
A__ = os.path.join(lowercase_,'weight.dat' )
self.assertTrue(os.path.isfile(lowercase_ ) )
self.assertDictEqual(lowercase_,{'weight': {'shape': [2, 3], 'dtype': str(lowercase_ ).split('.' )[1]}} )
A__ = load_offloaded_weight(lowercase_,index['weight'] )
self.assertTrue(torch.equal(lowercase_,lowercase_ ) )
def snake_case__ ( self : Optional[int] )-> Optional[Any]:
'''simple docstring'''
A__ = ModelForTest()
A__ = model.state_dict()
A__ = {k: v for k, v in state_dict.items() if 'linear2' not in k}
A__ = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase_,lowercase_ )
A__ = OffloadedWeightsLoader(state_dict=lowercase_,save_folder=lowercase_ )
# Every key is there with the right value
self.assertEqual(sorted(lowercase_ ),sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowercase_,weight_map[key] ) )
A__ = {k: v for k, v in state_dict.items() if 'weight' in k}
A__ = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase_,lowercase_ )
A__ = OffloadedWeightsLoader(state_dict=lowercase_,save_folder=lowercase_ )
# Every key is there with the right value
self.assertEqual(sorted(lowercase_ ),sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowercase_,weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase_,lowercase_ )
# Duplicates are removed
A__ = OffloadedWeightsLoader(state_dict=lowercase_,save_folder=lowercase_ )
# Every key is there with the right value
self.assertEqual(sorted(lowercase_ ),sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowercase_,weight_map[key] ) )
def snake_case__ ( self : int )-> Union[str, Any]:
'''simple docstring'''
A__ = {'a.1': 0, 'a.10': 1, 'a.2': 2}
A__ = extract_submodules_state_dict(lowercase_,['a.1', 'a.2'] )
self.assertDictEqual(lowercase_,{'a.1': 0, 'a.2': 2} )
A__ = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
A__ = extract_submodules_state_dict(lowercase_,['a.1', 'a.2'] )
self.assertDictEqual(lowercase_,{'a.1.a': 0, 'a.2.a': 2} )
| 282 | 1 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A__(a_, a_, unittest.TestCase ):
"""simple docstring"""
_A : int = IFInpaintingPipeline
_A : Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_A : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_A : Dict = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCamelCase__ ( self ) -> Optional[Any]:
return self._get_dummy_components()
def UpperCamelCase__ ( self , _lowercase , _lowercase=0 ) -> Any:
if str(_lowercase ).startswith("""mps""" ):
a_ : List[str] = torch.manual_seed(_lowercase )
else:
a_ : List[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
a_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
a_ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
a_ : str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase__ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCamelCase__ ( self ) -> Tuple:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def UpperCamelCase__ ( self ) -> Dict:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCamelCase__ ( self ) -> Dict:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCamelCase__ ( self ) -> Dict:
self._test_save_load_local()
def UpperCamelCase__ ( self ) -> Optional[int]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 248 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__snake_case : Tuple = logging.getLogger()
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : int = argparse.ArgumentParser()
parser.add_argument("""-f""")
a_ : Any = parser.parse_args()
return args.f
class A__(a_ ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> None:
a_ : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(_lowercase )
def UpperCamelCase__ ( self , _lowercase ) -> Dict:
a_ : List[str] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(_lowercase , """argv""" , _lowercase ):
a_ : Optional[int] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_lowercase , 0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase__ ( self ) -> List[str]:
a_ : Tuple = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(_lowercase )
a_ : Tuple = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_lowercase )
a_ : Optional[Any] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_lowercase )
| 248 | 1 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowercase__ :List[str] = 637_8137.0
lowercase__ :List[str] = 635_6752.31_4245
lowercase__ :Any = 637_8137
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowercase = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
lowercase = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowercase = haversine_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowercase = (b_lata + b_lata) / 2
lowercase = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowercase = (sin(lowerCAmelCase__ ) ** 2) * (cos(lowerCAmelCase__ ) ** 2)
lowercase = cos(sigma / 2 ) ** 2
lowercase = (sigma - sin(lowerCAmelCase__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowercase = (cos(lowerCAmelCase__ ) ** 2) * (sin(lowerCAmelCase__ ) ** 2)
lowercase = sin(sigma / 2 ) ** 2
lowercase = (sigma + sin(lowerCAmelCase__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self ,A__ ,A__=7 ,A__=3 ,A__=3_0 ,A__=4_0_0 ,A__=True ,A__=None ,A__=True ,A__=[0.5, 0.5, 0.5] ,A__=[0.5, 0.5, 0.5] ,A__=True ,A__=1 / 2_5_5 ,A__=True ,):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = size
lowercase = do_normalize
lowercase = image_mean
lowercase = image_std
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_pad
def A__ ( self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A__ ( self ,A__ ,A__=False):
if not batched:
lowercase = image_inputs[0]
if isinstance(A__ ,Image.Image):
lowercase , lowercase = image.size
else:
lowercase , lowercase = image.shape[1], image.shape[2]
if w < h:
lowercase = int(self.size['''shortest_edge'''] * h / w)
lowercase = self.size['''shortest_edge''']
elif w > h:
lowercase = self.size['''shortest_edge''']
lowercase = int(self.size['''shortest_edge'''] * w / h)
else:
lowercase = self.size['''shortest_edge''']
lowercase = self.size['''shortest_edge''']
else:
lowercase = []
for image in image_inputs:
lowercase , lowercase = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
lowercase = max(A__ ,key=lambda A__: item[0])[0]
lowercase = max(A__ ,key=lambda A__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Dict =ConditionalDetrImageProcessor if is_vision_available() else None
def A__ ( self):
lowercase = ConditionalDetrImageProcessingTester(self)
@property
def A__ ( self):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self):
lowercase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(A__ ,'''image_mean'''))
self.assertTrue(hasattr(A__ ,'''image_std'''))
self.assertTrue(hasattr(A__ ,'''do_normalize'''))
self.assertTrue(hasattr(A__ ,'''do_resize'''))
self.assertTrue(hasattr(A__ ,'''size'''))
def A__ ( self):
lowercase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size ,{'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3})
self.assertEqual(image_processor.do_pad ,A__)
lowercase = self.image_processing_class.from_dict(
self.image_processor_dict ,size=4_2 ,max_size=8_4 ,pad_and_return_pixel_mask=A__)
self.assertEqual(image_processor.size ,{'''shortest_edge''': 4_2, '''longest_edge''': 8_4})
self.assertEqual(image_processor.do_pad ,A__)
def A__ ( self):
pass
def A__ ( self):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A__)
for image in image_inputs:
self.assertIsInstance(A__ ,Image.Image)
# Test not batched input
lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''').pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(A__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowercase , lowercase = self.image_processor_tester.get_expected_values(A__ ,batched=A__)
lowercase = image_processing(A__ ,return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def A__ ( self):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A__ ,numpify=A__)
for image in image_inputs:
self.assertIsInstance(A__ ,np.ndarray)
# Test not batched input
lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''').pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(A__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowercase = image_processing(A__ ,return_tensors='''pt''').pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(A__ ,batched=A__)
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def A__ ( self):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A__ ,torchify=A__)
for image in image_inputs:
self.assertIsInstance(A__ ,torch.Tensor)
# Test not batched input
lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''').pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(A__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowercase = image_processing(A__ ,return_tensors='''pt''').pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(A__ ,batched=A__)
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def A__ ( self):
# prepare image and target
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' ,'''r''') as f:
lowercase = json.loads(f.read())
lowercase = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
lowercase = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''')
lowercase = image_processing(images=A__ ,annotations=A__ ,return_tensors='''pt''')
# verify pixel values
lowercase = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding['''pixel_values'''].shape ,A__)
lowercase = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] ,A__ ,atol=1E-4))
# verify area
lowercase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] ,A__))
# verify boxes
lowercase = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape ,A__)
lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] ,A__ ,atol=1E-3))
# verify image_id
lowercase = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] ,A__))
# verify is_crowd
lowercase = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] ,A__))
# verify class_labels
lowercase = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] ,A__))
# verify orig_size
lowercase = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] ,A__))
# verify size
lowercase = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] ,A__))
@slow
def A__ ( self):
# prepare image, target and masks_path
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' ,'''r''') as f:
lowercase = json.loads(f.read())
lowercase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
lowercase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''')
# encode them
lowercase = ConditionalDetrImageProcessor(format='''coco_panoptic''')
lowercase = image_processing(images=A__ ,annotations=A__ ,masks_path=A__ ,return_tensors='''pt''')
# verify pixel values
lowercase = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding['''pixel_values'''].shape ,A__)
lowercase = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] ,A__ ,atol=1E-4))
# verify area
lowercase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] ,A__))
# verify boxes
lowercase = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape ,A__)
lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] ,A__ ,atol=1E-3))
# verify image_id
lowercase = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] ,A__))
# verify is_crowd
lowercase = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] ,A__))
# verify class_labels
lowercase = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] ,A__))
# verify masks
lowercase = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() ,A__)
# verify orig_size
lowercase = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] ,A__))
# verify size
lowercase = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] ,A__))
| 97 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase_ ( __magic_name__ ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'ViltImageProcessor'
lowercase = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , A=None , A=None , **A ) -> Dict:
UpperCAmelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A , )
UpperCAmelCase : Any = kwargs.pop("""feature_extractor""" )
UpperCAmelCase : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A , A )
UpperCAmelCase : Union[str, Any] = self.image_processor
def __call__( self , A , A = None , A = True , A = False , A = None , A = None , A = 0 , A = None , A = None , A = None , A = False , A = False , A = False , A = False , A = True , A = None , **A , ) -> BatchEncoding:
UpperCAmelCase : Union[str, Any] = self.tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_token_type_ids=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_length=A , verbose=A , return_tensors=A , **A , )
# add pixel_values + pixel_mask
UpperCAmelCase : int = self.image_processor(A , return_tensors=A )
encoding.update(A )
return encoding
def _lowercase( self , *A , **A ) -> Dict:
return self.tokenizer.batch_decode(*A , **A )
def _lowercase( self , *A , **A ) -> List[str]:
return self.tokenizer.decode(*A , **A )
@property
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Optional[Any] = self.tokenizer.model_input_names
UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowercase( self ) -> Dict:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A , )
return self.image_processor_class
@property
def _lowercase( self ) -> List[str]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , A , )
return self.image_processor
| 265 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : Any = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = AlbertTokenizer
lowercase = AlbertTokenizerFast
lowercase = True
lowercase = True
lowercase = True
def _lowercase( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Optional[int] = AlbertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self , A ) -> int:
UpperCAmelCase : Optional[int] = """this is a test"""
UpperCAmelCase : Dict = """this is a test"""
return input_text, output_text
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = """<pad>"""
UpperCAmelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(A ) , 30000 )
def _lowercase( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _lowercase( self ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase : int = self.get_tokenizer()
UpperCAmelCase : List[str] = self.get_rust_tokenizer()
UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé."""
UpperCAmelCase : str = tokenizer.tokenize(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A )
UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : str = AlbertTokenizer(A )
UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" )
UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" )
UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowercase( self ) -> Dict:
# fmt: off
UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 265 | 1 |
def snake_case ( snake_case__ :int , snake_case__ :list[int] , snake_case__ :int) -> int:
def count_of_possible_combinations(snake_case__ :int) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item) for item in array)
return count_of_possible_combinations(snake_case__)
def snake_case ( snake_case__ :int , snake_case__ :list[int] , snake_case__ :int) -> int:
def count_of_possible_combinations_with_dp_array(
snake_case__ :int , snake_case__ :list[int]) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_A = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__)
for item in array)
_A = answer
return answer
_A = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__)
def snake_case ( snake_case__ :int , snake_case__ :list[int] , snake_case__ :int) -> int:
_A = [0] * (target + 1)
_A = 1
for i in range(1 , target + 1):
for j in range(snake_case__):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 5
_SCREAMING_SNAKE_CASE = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 81 | def snake_case ( snake_case__ :str) -> int:
_A = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""")
_A = hex_num[0] == """-"""
if is_negative:
_A = hex_num[1:]
try:
_A = int(snake_case__ , 16)
except ValueError:
raise ValueError("""Invalid value was passed to the function""")
_A = """"""
while int_num > 0:
_A = str(int_num % 2) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81 | 1 |
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def UpperCAmelCase_ ( __lowerCamelCase : Any ):
lowercase_ :Any = prime_factors(_UpperCamelCase )
if is_square_free(_UpperCamelCase ):
return -1 if len(_UpperCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 223 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a = logging.get_logger(__name__)
__a = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__a = {'''facebook/blenderbot-3B''': 1_28}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[int] = ['input_ids', 'attention_mask']
A : str = BlenderbotTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) )
lowercase : str = add_prefix_space
lowercase : List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = add_prefix_space
lowercase : str = '''post_processor'''
lowercase : str = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
lowercase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
lowercase : Union[str, Any] = tuple(state['''cls'''] )
lowercase : Optional[int] = False
if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : Any = add_prefix_space
lowercase : Tuple = True
if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
lowercase : List[str] = trim_offsets
lowercase : Optional[int] = True
if changes_to_apply:
lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , state.pop('''type''' ) )
lowercase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
lowercase : Any = value
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Any = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : int = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Tuple = [self.sep_token_id]
lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
return token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = ''' '''.join(SCREAMING_SNAKE_CASE__ )
lowercase : Any = self.encode(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
lowercase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 337 | 0 |
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : int = 50 ):
"""simple docstring"""
__UpperCAmelCase : Any = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'{solution() = }')
| 16 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=16 , __UpperCAmelCase=[1, 2, 1] , __UpperCAmelCase=[2, 2, 4] , __UpperCAmelCase=2 , __UpperCAmelCase=2.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=8 , ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Any = image_size
__UpperCAmelCase : Dict = patch_size
__UpperCAmelCase : Dict = num_channels
__UpperCAmelCase : List[Any] = embed_dim
__UpperCAmelCase : str = depths
__UpperCAmelCase : Dict = num_heads
__UpperCAmelCase : str = window_size
__UpperCAmelCase : int = mlp_ratio
__UpperCAmelCase : Union[str, Any] = qkv_bias
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = drop_path_rate
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : Optional[int] = use_absolute_embeddings
__UpperCAmelCase : Any = patch_norm
__UpperCAmelCase : Union[str, Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : Any = scope
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : Optional[int] = type_sequence_label_size
__UpperCAmelCase : int = encoder_stride
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Tuple = None
if self.use_labels:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Dict:
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = SwinvaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase )
__UpperCAmelCase : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Any = SwinvaForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : Dict = SwinvaForMaskedImageModeling(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase : str = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : str = self.type_sequence_label_size
__UpperCAmelCase : str = SwinvaForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Any = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = config_and_inputs
__UpperCAmelCase : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_SCREAMING_SNAKE_CASE : List[str] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Dict = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : List[str] = SwinvaModelTester(self )
__UpperCAmelCase : Any = ConfigTester(self , config_class=__UpperCAmelCase , embed_dim=37 )
def __A ( self ) -> Any:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def __A ( self ) -> Dict:
'''simple docstring'''
pass
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = model_class(__UpperCAmelCase )
__UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : str = [*signature.parameters.keys()]
__UpperCAmelCase : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[Any] = True
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : int = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__UpperCAmelCase : str = outputs.attentions
__UpperCAmelCase : Any = len(self.model_tester.depths )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase : Dict = True
__UpperCAmelCase : int = config.window_size**2
__UpperCAmelCase : Any = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : int = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__UpperCAmelCase : Dict = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__UpperCAmelCase : Dict = len(__UpperCAmelCase )
# Check attention is always last and order is fine
__UpperCAmelCase : Any = True
__UpperCAmelCase : Any = True
__UpperCAmelCase : Optional[int] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : List[str] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
__UpperCAmelCase : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__UpperCAmelCase : Optional[int] = 2
self.assertEqual(out_len + added_hidden_states , len(__UpperCAmelCase ) )
__UpperCAmelCase : Tuple = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__UpperCAmelCase : List[Any] = outputs.hidden_states
__UpperCAmelCase : List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# Swinv2 has a different seq_length
__UpperCAmelCase : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__UpperCAmelCase : int = outputs.reshaped_hidden_states
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = reshaped_hidden_states[0].shape
__UpperCAmelCase : Any = (
reshaped_hidden_states[0].view(__UpperCAmelCase , __UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Tuple = 3
__UpperCAmelCase : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCAmelCase : int = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Tuple = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase )
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Dict = SwinvaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Tuple = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCAmelCase : List[Any] = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class _A ( unittest.TestCase ):
@cached_property
def __A ( self ) -> int:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
__UpperCAmelCase )
__UpperCAmelCase : Tuple = self.default_image_processor
__UpperCAmelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__UpperCAmelCase : Any = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(**__UpperCAmelCase )
# verify the logits
__UpperCAmelCase : int = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 16 | 1 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__lowerCamelCase : str = TypeVar('''KEY''')
__lowerCamelCase : Optional[Any] = TypeVar('''VAL''')
@dataclass(frozen=_lowercase , slots=_lowercase )
class a__ ( Generic[KEY, VAL] ):
A = 42
A = 42
class a__ ( _Item ):
def __init__( self : List[str] ):
"""simple docstring"""
super().__init__(UpperCamelCase__,UpperCamelCase__ )
def __bool__( self : Optional[Any] ):
"""simple docstring"""
return False
__lowerCamelCase : List[str] = _DeletedItem()
class a__ ( MutableMapping[KEY, VAL] ):
def __init__( self : Any,_A : int = 8,_A : float = 0.75 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = initial_block_size
SCREAMING_SNAKE_CASE_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = capacity_factor
SCREAMING_SNAKE_CASE_ : List[Any] = 0
def __UpperCamelCase ( self : Union[str, Any],_A : KEY ):
"""simple docstring"""
return hash(UpperCamelCase__ ) % len(self._buckets )
def __UpperCamelCase ( self : List[str],_A : int ):
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def __UpperCamelCase ( self : List[Any],_A : int,_A : KEY,_A : VAL ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self._buckets[ind]
if not stored:
SCREAMING_SNAKE_CASE_ : Optional[int] = _Item(UpperCamelCase__,UpperCamelCase__ )
self._len += 1
return True
elif stored.key == key:
SCREAMING_SNAKE_CASE_ : Tuple = _Item(UpperCamelCase__,UpperCamelCase__ )
return True
else:
return False
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(UpperCamelCase__ )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
SCREAMING_SNAKE_CASE_ : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __UpperCamelCase ( self : Union[str, Any],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self._buckets
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [None] * new_size
SCREAMING_SNAKE_CASE_ : List[Any] = 0
for item in old_buckets:
if item:
self._add_item(item.key,item.val )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def __UpperCamelCase ( self : Any,_A : KEY ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self._get_bucket_index(UpperCamelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
SCREAMING_SNAKE_CASE_ : int = self._get_next_ind(UpperCamelCase__ )
def __UpperCamelCase ( self : Tuple,_A : KEY,_A : VAL ):
"""simple docstring"""
for ind in self._iterate_buckets(UpperCamelCase__ ):
if self._try_set(UpperCamelCase__,UpperCamelCase__,UpperCamelCase__ ):
break
def __setitem__( self : Optional[Any],_A : KEY,_A : VAL ):
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(UpperCamelCase__,UpperCamelCase__ )
def __delitem__( self : Tuple,_A : KEY ):
"""simple docstring"""
for ind in self._iterate_buckets(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = self._buckets[ind]
if item is None:
raise KeyError(UpperCamelCase__ )
if item is _deleted:
continue
if item.key == key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Dict,_A : KEY ):
"""simple docstring"""
for ind in self._iterate_buckets(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(UpperCamelCase__ )
def __len__( self : str ):
"""simple docstring"""
return self._len
def __iter__( self : Union[str, Any] ):
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = """ ,""".join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 18 |
'''simple docstring'''
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
lowerCamelCase__ : str = 0.0
for coeff in reversed(UpperCamelCase ):
lowerCamelCase__ : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
_A : Any =(0.0, 0.0, 5.0, 9.3, 7.0)
_A : Optional[Any] =10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 41 | 0 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
if gpta_config_file == "":
__lowerCAmelCase = GPTaConfig()
else:
__lowerCAmelCase = GPTaConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = GPTaModel(SCREAMING_SNAKE_CASE_ )
# Load weights from numpy
load_tf_weights_in_gpta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
__lowerCAmelCase = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
__lowerCAmelCase = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
UpperCamelCase__ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 352 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=512,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 102 | 0 |
__UpperCAmelCase = "Alexander Joslin"
import operator as op
from .stack import Stack
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
SCREAMING_SNAKE_CASE_ = Stack()
SCREAMING_SNAKE_CASE_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__lowerCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__lowerCAmelCase )
elif i == ")":
# RULE 4
SCREAMING_SNAKE_CASE_ = operator_stack.peek()
operator_stack.pop()
SCREAMING_SNAKE_CASE_ = operand_stack.peek()
operand_stack.pop()
SCREAMING_SNAKE_CASE_ = operand_stack.peek()
operand_stack.pop()
SCREAMING_SNAKE_CASE_ = operators[opr](__lowerCAmelCase, __lowerCAmelCase )
operand_stack.push(__lowerCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__UpperCAmelCase = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 299 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__lowerCAmelCase : Optional[Any] = {"UserAgent": UserAgent().random}
def UpperCAmelCase_ ( __lowerCAmelCase ) -> dict:
__lowercase : Optional[Any] = script.contents[0]
__lowercase : int = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Optional[int] ):
__lowercase : Dict = F'https://www.instagram.com/{username}/'
__lowercase : Tuple = self.get_json()
def snake_case_ ( self : Tuple ):
__lowercase : List[Any] = requests.get(self.url , headers=_snake_case ).text
__lowercase : str = BeautifulSoup(_snake_case , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Optional[Any] ):
return F'{self.__class__.__name__}(\'{self.username}\')'
def __str__( self : Optional[int] ):
return F'{self.fullname} ({self.username}) is {self.biography}'
@property
def snake_case_ ( self : Dict ):
return self.user_data["username"]
@property
def snake_case_ ( self : List[Any] ):
return self.user_data["full_name"]
@property
def snake_case_ ( self : Optional[Any] ):
return self.user_data["biography"]
@property
def snake_case_ ( self : Any ):
return self.user_data["business_email"]
@property
def snake_case_ ( self : int ):
return self.user_data["external_url"]
@property
def snake_case_ ( self : Union[str, Any] ):
return self.user_data["edge_followed_by"]["count"]
@property
def snake_case_ ( self : Dict ):
return self.user_data["edge_follow"]["count"]
@property
def snake_case_ ( self : Any ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def snake_case_ ( self : int ):
return self.user_data["profile_pic_url_hd"]
@property
def snake_case_ ( self : Optional[Any] ):
return self.user_data["is_verified"]
@property
def snake_case_ ( self : Optional[Any] ):
return self.user_data["is_private"]
def UpperCAmelCase_ ( __lowerCAmelCase = "github" ) -> None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
__lowercase : Dict = InstagramUser(__lowerCAmelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __lowerCAmelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[str] = InstagramUser("github")
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 156 | 0 |
"""simple docstring"""
def _A ( UpperCamelCase_ : int = 10**9) -> int:
'''simple docstring'''
__lowercase = 1
__lowercase = 2
__lowercase = 0
__lowercase = 0
__lowercase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__lowercase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"{solution() = }")
| 365 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _A ( ) -> Dict:
'''simple docstring'''
__lowercase = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
__lowercase = Dataset.from_dict(UpperCamelCase_)
return dataset
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def _lowercase ( self : int ):
__lowercase = get_dataset()
__lowercase = make_duplicate_clusters(UpperCAmelCase__, 0.85 )
self.assertEqual(len(duplicate_clusters[0] ), 2 )
def _lowercase ( self : Any ):
__lowercase = get_dataset()
__lowercase ,__lowercase = deduplicate_dataset(UpperCAmelCase__ )
self.assertEqual(len(UpperCAmelCase__ ), 2 )
print(UpperCAmelCase__ )
self.assertEqual(duplicate_clusters[0][0]["copies"], 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"], UpperCAmelCase__ )
| 144 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
lowercase_ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
with open(snake_case , '''r''' ) as file:
for line_number, line in enumerate(snake_case ):
__SCREAMING_SNAKE_CASE : List[Any] = line.strip()
if line:
__SCREAMING_SNAKE_CASE : Any = line.split()
__SCREAMING_SNAKE_CASE : Optional[Any] = line_number
__SCREAMING_SNAKE_CASE : Dict = words[0]
__SCREAMING_SNAKE_CASE : List[Any] = value
return result
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
for attribute in key.split('''.''' ):
__SCREAMING_SNAKE_CASE : Tuple = getattr(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case ):
__SCREAMING_SNAKE_CASE : int = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__SCREAMING_SNAKE_CASE : List[Any] = '''param'''
if weight_type is not None and weight_type != "param":
__SCREAMING_SNAKE_CASE : int = getattr(snake_case , snake_case ).shape
elif weight_type is not None and weight_type == "param":
__SCREAMING_SNAKE_CASE : Tuple = hf_pointer
for attribute in hf_param_name.split('''.''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = getattr(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : str = shape_pointer.shape
# let's reduce dimension
__SCREAMING_SNAKE_CASE : Union[str, Any] = value[0]
else:
__SCREAMING_SNAKE_CASE : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__SCREAMING_SNAKE_CASE : int = value
elif weight_type == "weight_g":
__SCREAMING_SNAKE_CASE : Optional[Any] = value
elif weight_type == "weight_v":
__SCREAMING_SNAKE_CASE : Union[str, Any] = value
elif weight_type == "bias":
__SCREAMING_SNAKE_CASE : Union[str, Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
__SCREAMING_SNAKE_CASE : Tuple = getattr(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Optional[Any] = value
else:
__SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case ):
__SCREAMING_SNAKE_CASE : Tuple = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''param'''
if weight_type is not None and weight_type != "param":
__SCREAMING_SNAKE_CASE : List[Any] = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__SCREAMING_SNAKE_CASE : Any = '''.'''.join([key, hf_param_name] )
else:
__SCREAMING_SNAKE_CASE : List[Any] = key
__SCREAMING_SNAKE_CASE : str = value if '''lm_head''' in full_key else value[0]
lowercase_ = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def a__ ( snake_case , snake_case , snake_case=None , snake_case=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = False
for key, mapped_key in MAPPING.items():
__SCREAMING_SNAKE_CASE : Optional[Any] = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
if "*" in mapped_key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.split(snake_case )[0].split('''.''' )[-2]
__SCREAMING_SNAKE_CASE : Union[str, Any] = mapped_key.replace('''*''' , snake_case )
if "weight_g" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''weight_g'''
elif "weight_v" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''weight_v'''
elif "bias" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__SCREAMING_SNAKE_CASE : List[Any] = '''weight'''
else:
__SCREAMING_SNAKE_CASE : Dict = None
if hf_dict is not None:
rename_dict(snake_case , snake_case , snake_case , snake_case , snake_case )
else:
set_recursively(snake_case , snake_case , snake_case , snake_case , snake_case )
return is_used
return is_used
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : str = fairseq_model.state_dict()
__SCREAMING_SNAKE_CASE : Optional[Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__SCREAMING_SNAKE_CASE : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
snake_case , snake_case , snake_case , snake_case , hf_model.config.feat_extract_norm == '''group''' , )
__SCREAMING_SNAKE_CASE : Optional[Any] = True
else:
__SCREAMING_SNAKE_CASE : List[Any] = load_wavaveca_layer(snake_case , snake_case , snake_case )
if not is_used:
unused_weights.append(snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = full_name.split('''conv_layers.''' )[-1]
__SCREAMING_SNAKE_CASE : Optional[Any] = name.split('''.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = int(items[0] )
__SCREAMING_SNAKE_CASE : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__SCREAMING_SNAKE_CASE : str = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__SCREAMING_SNAKE_CASE : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case )
@torch.no_grad()
def a__ ( snake_case , snake_case , snake_case=None , snake_case=None , snake_case=True , snake_case=False ):
"""simple docstring"""
if config_path is not None:
__SCREAMING_SNAKE_CASE : Tuple = WavaVecaConfig.from_pretrained(snake_case )
else:
__SCREAMING_SNAKE_CASE : Dict = WavaVecaConfig()
if is_seq_class:
__SCREAMING_SNAKE_CASE : Union[str, Any] = read_txt_into_dict(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = idalabel
__SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaForSequenceClassification(snake_case )
__SCREAMING_SNAKE_CASE : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=snake_case , return_attention_mask=snake_case , )
feature_extractor.save_pretrained(snake_case )
elif is_finetuned:
if dict_path:
__SCREAMING_SNAKE_CASE : Tuple = Dictionary.load(snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__SCREAMING_SNAKE_CASE : Optional[int] = target_dict.pad_index
__SCREAMING_SNAKE_CASE : str = target_dict.bos_index
__SCREAMING_SNAKE_CASE : Tuple = target_dict.eos_index
__SCREAMING_SNAKE_CASE : Any = len(target_dict.symbols )
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(snake_case , '''vocab.json''' )
if not os.path.isdir(snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(snake_case ) )
return
os.makedirs(snake_case , exist_ok=snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : Dict = 1
with open(snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : int = WavaVecaCTCTokenizer(
snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=snake_case , )
__SCREAMING_SNAKE_CASE : Any = True if config.feat_extract_norm == '''layer''' else False
__SCREAMING_SNAKE_CASE : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=snake_case , return_attention_mask=snake_case , )
__SCREAMING_SNAKE_CASE : Any = WavaVecaProcessor(feature_extractor=snake_case , tokenizer=snake_case )
processor.save_pretrained(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = WavaVecaForCTC(snake_case )
else:
__SCREAMING_SNAKE_CASE : List[str] = WavaVecaForPreTraining(snake_case )
if is_finetuned or is_seq_class:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__SCREAMING_SNAKE_CASE : Tuple = argparse.Namespace(task='''audio_pretraining''' )
__SCREAMING_SNAKE_CASE : int = fairseq.tasks.setup_task(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model[0].eval()
recursively_load_weights(snake_case , snake_case , not is_finetuned )
hf_wavavec.save_pretrained(snake_case )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
lowercase_ = parser.parse_args()
lowercase_ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 303 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowercase_ = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def a__ ( snake_case ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : int = False
elif args.student_type == "gpt2":
__SCREAMING_SNAKE_CASE : Optional[int] = False
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : Dict = False
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case , required=snake_case , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case , required=snake_case , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case , required=snake_case , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case , type=snake_case , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case , required=snake_case , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case , default=4_000 , help='''Checkpoint interval.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
sanity_checks(snake_case )
# ARGS #
init_gpu_params(snake_case )
set_seed(snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case ) , snake_case , indent=4 )
git_log(args.dump_path )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = MODEL_CLASSES[args.student_type]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__SCREAMING_SNAKE_CASE : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__SCREAMING_SNAKE_CASE : Any = tokenizer.all_special_tokens.index(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__SCREAMING_SNAKE_CASE : Any = special_tok_ids
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : List[str] = pickle.load(snake_case )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : Optional[Any] = pickle.load(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = np.maximum(snake_case , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__SCREAMING_SNAKE_CASE : Any = 0.0 # do not predict special tokens
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(snake_case )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = LmSeqsDataset(params=snake_case , data=snake_case )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_config_class.from_pretrained(args.student_config )
__SCREAMING_SNAKE_CASE : Dict = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case )
else:
__SCREAMING_SNAKE_CASE : str = student_model_class(snake_case )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__SCREAMING_SNAKE_CASE : List[str] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case , snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case , snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__SCREAMING_SNAKE_CASE : int = Distiller(
params=snake_case , dataset=snake_case , token_probs=snake_case , student=snake_case , teacher=snake_case )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 303 | 1 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=36 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1000 , ) -> List[Any]:
snake_case_ : Optional[int] = parent
snake_case_ : Tuple = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : List[Any] = image_size
snake_case_ : Optional[int] = patch_size
snake_case_ : List[str] = is_training
snake_case_ : Union[str, Any] = use_input_mask
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Union[str, Any] = use_labels
snake_case_ : Dict = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Dict = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Any = type_vocab_size
snake_case_ : Optional[Any] = type_sequence_label_size
snake_case_ : List[str] = initializer_range
snake_case_ : int = coordinate_size
snake_case_ : int = shape_size
snake_case_ : Any = num_labels
snake_case_ : List[Any] = num_choices
snake_case_ : Optional[Any] = scope
snake_case_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
snake_case_ : List[str] = text_seq_length
snake_case_ : Dict = (image_size // patch_size) ** 2 + 1
snake_case_ : Dict = self.text_seq_length + self.image_seq_length
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
snake_case_ : Tuple = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ : List[Any] = bbox[i, j, 3]
snake_case_ : Union[str, Any] = bbox[i, j, 1]
snake_case_ : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ : Tuple = bbox[i, j, 2]
snake_case_ : int = bbox[i, j, 0]
snake_case_ : Dict = tmp_coordinate
snake_case_ : Tuple = tf.constant(_SCREAMING_SNAKE_CASE )
snake_case_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Optional[Any] = None
if self.use_input_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
snake_case_ : Dict = None
if self.use_token_type_ids:
snake_case_ : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
snake_case_ : Any = None
snake_case_ : Any = None
if self.use_labels:
snake_case_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
snake_case_ : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ : int = TFLayoutLMvaModel(config=_SCREAMING_SNAKE_CASE )
# text + image
snake_case_ : List[Any] = model(_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE , )
snake_case_ : Any = model(_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
snake_case_ : Optional[int] = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
snake_case_ : Optional[int] = model({"pixel_values": pixel_values} , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ : str = self.num_labels
snake_case_ : Optional[Any] = TFLayoutLMvaForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ : Union[str, Any] = self.num_labels
snake_case_ : Any = TFLayoutLMvaForTokenClassification(config=_SCREAMING_SNAKE_CASE )
snake_case_ : int = model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
snake_case_ : int = 2
snake_case_ : str = TFLayoutLMvaForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Optional[int] = self.prepare_config_and_inputs()
((snake_case_) , (snake_case_) , (snake_case_) , (snake_case_) , (snake_case_) , (snake_case_) , (snake_case_) , (snake_case_)) : Optional[int] = config_and_inputs
snake_case_ : List[str] = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
A : int = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
A : Optional[Any] = False
A : Tuple = False
A : int = False
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return True
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> dict:
snake_case_ : Optional[int] = copy.deepcopy(_SCREAMING_SNAKE_CASE )
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
snake_case_ : Tuple = {
k: tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_SCREAMING_SNAKE_CASE , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
snake_case_ : Any = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_SCREAMING_SNAKE_CASE ):
snake_case_ : Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
snake_case_ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_SCREAMING_SNAKE_CASE ):
snake_case_ : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_SCREAMING_SNAKE_CASE ):
snake_case_ : Optional[int] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : Dict = TFLayoutLMvaModelTester(self )
snake_case_ : Union[str, Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowerCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ , snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Dict = model_class(_SCREAMING_SNAKE_CASE )
if getattr(_SCREAMING_SNAKE_CASE , "hf_compute_loss" , _SCREAMING_SNAKE_CASE ):
# The number of elements in the loss should be the same as the number of elements in the label
snake_case_ : List[str] = self._prepare_for_class(inputs_dict.copy() , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
snake_case_ : Any = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_SCREAMING_SNAKE_CASE )[0]
]
snake_case_ : int = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
snake_case_ : Any = self._prepare_for_class(inputs_dict.copy() , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = prepared_for_class.pop("input_ids" )
snake_case_ : List[str] = model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
snake_case_ : int = self._prepare_for_class(inputs_dict.copy() , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
snake_case_ : str = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
snake_case_ : Any = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
snake_case_ : Union[str, Any] = -100
snake_case_ : List[str] = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
snake_case_ : str = self._prepare_for_class(inputs_dict.copy() , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = model(_SCREAMING_SNAKE_CASE )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
snake_case_ : List[Any] = self._prepare_for_class(inputs_dict.copy() , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
# Get keys that were added with the _prepare_for_class function
snake_case_ : Dict = prepared_for_class.keys() - inputs_dict.keys()
snake_case_ : Dict = inspect.signature(model.call ).parameters
snake_case_ : int = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
snake_case_ : Optional[Any] = {0: "input_ids"}
for label_key in label_keys:
snake_case_ : Optional[Any] = signature_names.index(_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = label_key
snake_case_ : Optional[int] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
snake_case_ : List[str] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
snake_case_ : Tuple = prepared_for_class[value]
snake_case_ : Tuple = tuple(_SCREAMING_SNAKE_CASE )
# Send to model
snake_case_ : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _lowerCAmelCase ( self ) -> str:
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Dict:
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : List[Any] = type
self.model_tester.create_and_check_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Any:
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[Any]:
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def _lowerCAmelCase ( self ) -> Optional[int]:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Union[str, Any] = TFLayoutLMvaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( ):
snake_case_ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ) -> str:
return LayoutLMvaImageProcessor(apply_ocr=_SCREAMING_SNAKE_CASE ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : List[str] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
snake_case_ : int = self.default_image_processor
snake_case_ : List[Any] = prepare_img()
snake_case_ : int = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="tf" ).pixel_values
snake_case_ : Optional[Any] = tf.constant([[1, 2]] )
snake_case_ : Union[str, Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
snake_case_ : str = model(input_ids=_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
# verify the logits
snake_case_ : List[str] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _SCREAMING_SNAKE_CASE )
snake_case_ : Dict = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 36 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> int:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
snake_case_ : List[Any] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Dict = "sshleifer/tiny-gpt2"
snake_case_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Optional[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> int:
snake_case_ : List[Any] = "sgugger/tiny-distilbert-classification"
snake_case_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , only_pretrain_model=_SCREAMING_SNAKE_CASE , )
snake_case_ : int = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : List[str] = "sshleifer/tiny-gpt2"
snake_case_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Optional[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Union[str, Any] = "sshleifer/tiny-gpt2"
snake_case_ : List[str] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
snake_case_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : str = "sshleifer/tiny-gpt2"
snake_case_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
snake_case_ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : List[str] = "sshleifer/tiny-gpt2"
snake_case_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : str = "sshleifer/tiny-gpt2"
snake_case_ : str = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Optional[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
snake_case_ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : List[str] = "patrickvonplaten/t5-tiny-random"
snake_case_ : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
snake_case_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : int = "sshleifer/tiny-gpt2"
snake_case_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Union[str, Any] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , save_to_csv=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , "inf_mem.csv" ) , env_info_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , "env.csv" ) , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Dict = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
benchmark.run()
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , "env.csv" ) ).exists() )
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : int = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(_SCREAMING_SNAKE_CASE ):
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "sequential" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "cumulative" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "current" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_SCREAMING_SNAKE_CASE , "log.txt" ) , log_print=_SCREAMING_SNAKE_CASE , trace_memory_line_by_line=_SCREAMING_SNAKE_CASE , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Tuple = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : int = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , "log.txt" ) ).exists() )
| 36 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = inspect.getfile(accelerate.test_utils )
_snake_case = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_snake_case = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_snake_case = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def A ( self : Optional[Any] ):
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices.''' )
_snake_case = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
@require_multi_gpu
def A ( self : Dict ):
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices.''' )
_snake_case = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
@require_multi_gpu
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
@require_multi_gpu
def A ( self : Optional[int] ):
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
_snake_case = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase : Any = Accelerator()
_lowerCamelCase : Optional[int] = (accelerator.state.process_index + 2, 10)
_lowerCamelCase : Tuple = torch.randint(0, 10, shape).to(accelerator.device)
_lowerCamelCase : Optional[int] = ''''''
_lowerCamelCase : List[str] = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCamelCase : int = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCamelCase : int = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 282 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(lowercase )
def A ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : Union[torch.Tensor, float, int] , lowercase : torch.Tensor , lowercase : List[torch.tensor] , lowercase : List[float] , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[Dict[str, Any]] = None , lowercase : bool = False , lowercase : bool = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowercase , lowercase , self.nets ) ):
_snake_case , _snake_case = controlnet(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# merge samples
if i == 0:
_snake_case , _snake_case = down_samples, mid_sample
else:
_snake_case = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase , lowercase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = True , lowercase : Callable = None , lowercase : bool = False , lowercase : Optional[str] = None , ):
'''simple docstring'''
_snake_case = 0
_snake_case = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase , is_main_process=lowercase , save_function=lowercase , safe_serialization=lowercase , variant=lowercase , )
idx += 1
_snake_case = model_path_to_save + f'''_{idx}'''
@classmethod
def A ( cls : Any , lowercase : Optional[Union[str, os.PathLike]] , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = 0
_snake_case = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case = pretrained_model_path
while os.path.isdir(lowercase ):
_snake_case = ControlNetModel.from_pretrained(lowercase , **lowercase )
controlnets.append(lowercase )
idx += 1
_snake_case = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(lowercase )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowercase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(lowercase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowercase ) | 282 | 1 |
def __UpperCamelCase ( _UpperCAmelCase = 600851475143 ):
try:
__UpperCAmelCase : List[str] = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__UpperCAmelCase : int = 2
__UpperCAmelCase : Optional[Any] = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__UpperCAmelCase : int = i
while n % i == 0:
__UpperCAmelCase : Dict = n // i
i += 1
return int(lowercase__ )
if __name__ == "__main__":
print(f"{solution() = }")
| 362 |
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase__ : Dict = get_tests_dir("fixtures/dummy-config.json")
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : str = 0
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : List[str] = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : Any = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__UpperCAmelCase : int = os.path.join(UpperCAmelCase_ , "fake-roberta" )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(type(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
try:
AutoConfig.register("custom" , UpperCAmelCase_ )
# Wrong model type will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoConfig.register("model" , UpperCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoConfig.register("bert" , UpperCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCAmelCase : List[str] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : Dict = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , "bert-base is not a local folder and is not a valid model identifier" ):
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained("bert-base" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__UpperCAmelCase : int = AutoConfig.from_pretrained(UpperCAmelCase_ , revision="aaaaaa" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCAmelCase_ ):
__UpperCAmelCase : int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase_ ):
__UpperCAmelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''new-model'''
try:
AutoConfig.register("new-model" , UpperCAmelCase_ )
# If remote code is not set, the default is to use local
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
__UpperCAmelCase : int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 37 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __snake_case ( lowerCAmelCase ):
_a : List[str]= "openai/whisper-base"
_a : Union[str, Any]= (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
_a : List[Any]= "transcriber"
_a : Optional[Any]= WhisperProcessor
_a : List[Any]= WhisperForConditionalGeneration
_a : int= ["audio"]
_a : Tuple= ["text"]
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
return self.pre_processor(snake_case ,return_tensors="""pt""" ).input_features
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
return self.model.generate(inputs=snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
return self.pre_processor.batch_decode(snake_case ,skip_special_tokens=snake_case )[0]
| 20 | """simple docstring"""
UpperCAmelCase__ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
UpperCAmelCase__ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 1_2,
"""Pm""": 1_5,
"""Em""": 1_8,
"""Zm""": 2_1,
"""Ym""": 2_4,
}
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = from_type.lower().strip("""s""" )
_UpperCAmelCase = to_type.lower().strip("""s""" )
_UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase )
_UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase )
if from_sanitized not in METRIC_CONVERSION:
_UpperCAmelCase = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(lowercase )}'''
)
raise ValueError(lowercase )
if to_sanitized not in METRIC_CONVERSION:
_UpperCAmelCase = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(lowercase )}'''
)
raise ValueError(lowercase )
_UpperCAmelCase = METRIC_CONVERSION[from_sanitized]
_UpperCAmelCase = METRIC_CONVERSION[to_sanitized]
_UpperCAmelCase = 1
if from_exponent > to_exponent:
_UpperCAmelCase = from_exponent - to_exponent
else:
_UpperCAmelCase = -(to_exponent - from_exponent)
return value * pow(10 ,lowercase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 289 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : List[str] = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class A ( __snake_case ):
lowercase_ = "longformer"
def __init__( self : Optional[int] , lowerCAmelCase_ : Union[List[int], int] = 5_12 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 3_05_22 , lowerCAmelCase_ : int = 7_68 , lowerCAmelCase_ : int = 12 , lowerCAmelCase_ : int = 12 , lowerCAmelCase_ : int = 30_72 , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 5_12 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 1e-12 , lowerCAmelCase_ : bool = False , **lowerCAmelCase_ : int , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
_a = attention_window
_a = sep_token_id
_a = bos_token_id
_a = eos_token_id
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = onnx_export
class A ( __snake_case ):
def __init__( self : Dict , lowerCAmelCase_ : "PretrainedConfig" , lowerCAmelCase_ : str = "default" , lowerCAmelCase_ : "List[PatchingSpec]" = None ) -> Any:
"""simple docstring"""
super().__init__(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_a = True
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def __lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
_a = super().outputs
if self.task == "default":
_a = {0: """batch"""}
return outputs
@property
def __lowerCAmelCase ( self : List[str] ) -> float:
"""simple docstring"""
return 1e-4
@property
def __lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
return max(super().default_onnx_opset , 14 )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : "PreTrainedTokenizerBase" , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_a = super().generate_dummy_inputs(
preprocessor=lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_a = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
_a = 1
return inputs
| 365 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_snake_case : Any = logging.get_logger(__name__)
_snake_case : Any = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : List[str] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
_snake_case : int = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
_snake_case : int = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class A ( _a ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = BertTokenizer
def __init__( self : str , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Any="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Tuple="[PAD]" , lowerCAmelCase_ : Tuple="[CLS]" , lowerCAmelCase_ : Optional[int]="[MASK]" , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_a = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**lowerCAmelCase_ )
_a = do_lower_case
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int=None ) -> List[str]:
"""simple docstring"""
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_a = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 179 | 0 |
"""simple docstring"""
def _A ( lowercase = 1_00 ):
"""simple docstring"""
a =(n * (n + 1) // 2) ** 2
a =n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }') | 81 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase_ : List[str] = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 81 | 1 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = args.log_outputs
UpperCAmelCase_ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
UpperCAmelCase_ = load_metric('''wer''' )
UpperCAmelCase_ = load_metric('''cer''' )
# compute metrics
UpperCAmelCase_ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
UpperCAmelCase_ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
UpperCAmelCase_ = f"WER: {wer_result}\nCER: {cer_result}"
print(__UpperCAmelCase )
with open(f"{dataset_id}_eval_results.txt" , '''w''' ) as f:
f.write(__UpperCAmelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase_ = f"log_{dataset_id}_predictions.txt"
UpperCAmelCase_ = f"log_{dataset_id}_targets.txt"
with open(__UpperCAmelCase , '''w''' ) as p, open(__UpperCAmelCase , '''w''' ) as t:
# mapping function to write output
def write_to_file(__UpperCAmelCase , __UpperCAmelCase ):
p.write(f"{i}" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"{i}" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(__UpperCAmelCase , with_indices=__UpperCAmelCase )
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase_ = re.sub(__UpperCAmelCase , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase_ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
UpperCAmelCase_ = ''' '''.join(text.split(__UpperCAmelCase ) )
return text
def A ( __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__UpperCAmelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase_ = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase_ = dataset.cast_column('''audio''' , Audio(sampling_rate=__UpperCAmelCase ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase_ = 0 if torch.cuda.is_available() else -1
UpperCAmelCase_ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__UpperCAmelCase ):
UpperCAmelCase_ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase_ = prediction['''text''']
UpperCAmelCase_ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
UpperCAmelCase_ = dataset.map(__UpperCAmelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCamelCase_ = parser.parse_args()
main(args)
| 351 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =(PNDMScheduler,)
UpperCamelCase__ : Optional[Any] =(("num_inference_steps", 50),)
def __a ( self :Union[str, Any] , **_lowercase :Any) -> Union[str, Any]:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_lowercase)
return config
def __a ( self :str , _lowercase :List[Any]=0 , **_lowercase :str) -> Union[str, Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Any) -> Optional[Any]:
pass
def __a ( self :str , _lowercase :int=0 , **_lowercase :Union[str, Any]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :int , **_lowercase :str) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.prk_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :Union[str, Any]) -> int:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , '''set_timesteps'''):
scheduler.set_timesteps(_lowercase)
elif num_inference_steps is not None and not hasattr(_lowercase , '''set_timesteps'''):
UpperCAmelCase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __a ( self :Any) -> Dict:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :List[Any]) -> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(steps_offset=1)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , )
def __a ( self :Optional[int]) -> str:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase)
def __a ( self :Any) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase)
def __a ( self :List[Any]) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Tuple:
for t in [1, 5, 10]:
self.check_over_forward(time_step=_lowercase)
def __a ( self :Tuple) -> Dict:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=_lowercase)
def __a ( self :str) -> List[Any]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
def __a ( self :List[str]) -> int:
with self.assertRaises(_lowercase):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def __a ( self :List[str]) -> Dict:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 198.1_318) < 1E-2
assert abs(result_mean.item() - 0.2_580) < 1E-3
def __a ( self :Any) -> Tuple:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 67.3_986) < 1E-2
assert abs(result_mean.item() - 0.0_878) < 1E-3
def __a ( self :int) -> Any:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 230.0_399) < 1E-2
assert abs(result_mean.item() - 0.2_995) < 1E-3
def __a ( self :Any) -> Dict:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 186.9_482) < 1E-2
assert abs(result_mean.item() - 0.2_434) < 1E-3
| 344 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase = 50 ) -> int:
lowercase__ : int = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True ) -> Union[str, Any]:
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
lowercase__ : str = timm.create_model('''levit_128s''' , pretrained=__lowerCamelCase )
else:
lowercase__ : Tuple = timm.create_model('''levit_128''' , pretrained=__lowerCamelCase )
if hidden_sizes == 1_92:
lowercase__ : Union[str, Any] = timm.create_model('''levit_192''' , pretrained=__lowerCamelCase )
if hidden_sizes == 2_56:
lowercase__ : str = timm.create_model('''levit_256''' , pretrained=__lowerCamelCase )
if hidden_sizes == 3_84:
lowercase__ : str = timm.create_model('''levit_384''' , pretrained=__lowerCamelCase )
from_model.eval()
lowercase__ : Optional[int] = LevitForImageClassificationWithTeacher(__lowerCamelCase ).eval()
lowercase__ : str = OrderedDict()
lowercase__ : int = from_model.state_dict()
lowercase__ : Dict = list(from_model.state_dict().keys() )
lowercase__ : Any = list(our_model.state_dict().keys() )
print(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for i in range(len(__lowerCamelCase ) ):
lowercase__ : str = weights[og_keys[i]]
our_model.load_state_dict(__lowerCamelCase )
lowercase__ : Optional[int] = torch.randn((2, 3, 2_24, 2_24) )
lowercase__ : Optional[int] = from_model(__lowerCamelCase )
lowercase__ : List[Any] = our_model(__lowerCamelCase ).logits
assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one."
lowercase__ : Any = name
print(__lowerCamelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase__ : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True ) -> List[Any]:
lowercase__ : Any = '''imagenet-1k-id2label.json'''
lowercase__ : Tuple = 10_00
lowercase__ : Dict = (1, num_labels)
lowercase__ : List[str] = '''huggingface/label-files'''
lowercase__ : str = num_labels
lowercase__ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Union[str, Any] = idalabel
lowercase__ : Optional[int] = {v: k for k, v in idalabel.items()}
lowercase__ : List[Any] = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
lowercase__ : Tuple = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
lowercase__ : Any = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 16 | 1 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Tuple =(IPNDMScheduler,)
UpperCamelCase__ : List[str] =(("""num_inference_steps""", 5_0),)
def __lowercase ( self , **lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] ={'num_train_timesteps': 1000}
config.update(**lowerCamelCase__ )
return config
def __lowercase ( self , lowerCamelCase__=0 , **lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Dict =dict(self.forward_default_kwargs )
__UpperCamelCase : Union[str, Any] =kwargs.pop('num_inference_steps' , lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =self.dummy_sample
__UpperCamelCase : Dict =0.1 * sample
__UpperCamelCase : List[str] =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCamelCase : int =self.get_scheduler_config(**lowerCamelCase__ )
__UpperCamelCase : Optional[int] =scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
__UpperCamelCase : Dict =dummy_past_residuals[:]
if time_step is None:
__UpperCamelCase : Union[str, Any] =scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
__UpperCamelCase : str =scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
__UpperCamelCase : int =dummy_past_residuals[:]
__UpperCamelCase : int =scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
__UpperCamelCase : str =new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCamelCase : List[str] =scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
__UpperCamelCase : Optional[int] =new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowercase ( self ):
"""simple docstring"""
pass
def __lowercase ( self , lowerCamelCase__=0 , **lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Any =dict(self.forward_default_kwargs )
__UpperCamelCase : List[str] =kwargs.pop('num_inference_steps' , lowerCamelCase__ )
__UpperCamelCase : int =self.dummy_sample
__UpperCamelCase : int =0.1 * sample
__UpperCamelCase : Union[str, Any] =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCamelCase : List[str] =self.get_scheduler_config()
__UpperCamelCase : List[Any] =scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
__UpperCamelCase : Dict =dummy_past_residuals[:]
if time_step is None:
__UpperCamelCase : Dict =scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
__UpperCamelCase : Dict =scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
__UpperCamelCase : Optional[int] =dummy_past_residuals[:]
__UpperCamelCase : Optional[int] =scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
__UpperCamelCase : Optional[int] =new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCamelCase : Any =scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
__UpperCamelCase : Optional[Any] =new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowercase ( self , **lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self.scheduler_classes[0]
__UpperCamelCase : Tuple =self.get_scheduler_config(**lowerCamelCase__ )
__UpperCamelCase : List[str] =scheduler_class(**lowerCamelCase__ )
__UpperCamelCase : List[str] =10
__UpperCamelCase : List[Any] =self.dummy_model()
__UpperCamelCase : Any =self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase : Union[str, Any] =model(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[int] =scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase : Dict =model(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
return sample
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =dict(self.forward_default_kwargs )
__UpperCamelCase : int =kwargs.pop('num_inference_steps' , lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
__UpperCamelCase : Tuple =self.get_scheduler_config()
__UpperCamelCase : str =scheduler_class(**lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =self.dummy_sample
__UpperCamelCase : Any =0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ , 'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ , 'set_timesteps' ):
__UpperCamelCase : Any =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCamelCase : Optional[Any] =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__UpperCamelCase : Any =dummy_past_residuals[:]
__UpperCamelCase : Optional[int] =scheduler.timesteps[5]
__UpperCamelCase : int =scheduler.timesteps[6]
__UpperCamelCase : Optional[Any] =scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
__UpperCamelCase : List[str] =scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__UpperCamelCase : Union[str, Any] =scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
__UpperCamelCase : Tuple =scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowercase ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ , time_step=lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase__ , time_step=lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =self.full_loop()
__UpperCamelCase : Dict =torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 245 |
import itertools
import math
def A ( a_ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(a_ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A ( ) -> Tuple:
__UpperCamelCase : Optional[Any] =2
while True:
if is_prime(a_ ):
yield num
num += 1
def A ( a_ = 10_001 ) -> int:
return next(itertools.islice(prime_generator() ,nth - 1 ,a_ ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 245 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , lowercase=1 / 255 , lowercase=True , ) -> Dict:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_pad
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=False ) -> str:
if not batched:
lowerCamelCase_ = image_inputs[0]
if isinstance(lowercase , Image.Image ):
lowerCamelCase_ , lowerCamelCase_ = image.size
else:
lowerCamelCase_ , lowerCamelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ = int(self.size["shortest_edge"] * h / w )
lowerCamelCase_ = self.size["shortest_edge"]
elif w > h:
lowerCamelCase_ = self.size["shortest_edge"]
lowerCamelCase_ = int(self.size["shortest_edge"] * w / h )
else:
lowerCamelCase_ = self.size["shortest_edge"]
lowerCamelCase_ = self.size["shortest_edge"]
else:
lowerCamelCase_ = []
for image in image_inputs:
lowerCamelCase_ , lowerCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ = max(lowercase , key=lambda lowercase : item[0] )[0]
lowerCamelCase_ = max(lowercase , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = DeformableDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "image_mean" ) )
self.assertTrue(hasattr(lowercase , "image_std" ) )
self.assertTrue(hasattr(lowercase , "do_normalize" ) )
self.assertTrue(hasattr(lowercase , "do_resize" ) )
self.assertTrue(hasattr(lowercase , "do_rescale" ) )
self.assertTrue(hasattr(lowercase , "do_pad" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowercase )
lowerCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
pass
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
lowerCamelCase_ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(lowercase , return_tensors="pt" ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(lowercase , return_tensors="pt" ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
# prepare image and target
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {"image_id": 39769, "annotations": target}
# encode them
lowerCamelCase_ = DeformableDetrImageProcessor()
lowerCamelCase_ = image_processing(images=lowercase , annotations=lowercase , return_tensors="pt" )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowercase )
lowerCamelCase_ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase )
lowerCamelCase_ = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase ) )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase ) )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
# prepare image, target and masks_path
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
lowerCamelCase_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowerCamelCase_ = DeformableDetrImageProcessor(format="coco_panoptic" )
lowerCamelCase_ = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors="pt" )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowercase )
lowerCamelCase_ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase )
lowerCamelCase_ = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase ) )
# verify masks
lowerCamelCase_ = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowercase )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase ) )
| 19 |
import numpy as np
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = (0, 0)
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : int = 0
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : List[Any] = 0
def __eq__( self : Optional[int] , a_ : List[Any] ):
'''simple docstring'''
return self.position == cell.position
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
print(self.position )
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : str , a_ : Dict=(5, 5) ):
'''simple docstring'''
__UpperCAmelCase : str = np.zeros(a_ )
__UpperCAmelCase : List[Any] = world_size[0]
__UpperCAmelCase : List[str] = world_size[1]
def snake_case__ ( self : Tuple ):
'''simple docstring'''
print(self.w )
def snake_case__ ( self : Union[str, Any] , a_ : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__UpperCAmelCase : str = cell.position[0]
__UpperCAmelCase : List[Any] = cell.position[1]
__UpperCAmelCase : str = []
for n in neughbour_cord:
__UpperCAmelCase : Optional[Any] = current_x + n[0]
__UpperCAmelCase : Any = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__UpperCAmelCase : int = Cell()
__UpperCAmelCase : List[str] = (x, y)
__UpperCAmelCase : List[Any] = cell
neighbours.append(a_ )
return neighbours
def a ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Optional[Any] = []
_open.append(_UpperCAmelCase )
while _open:
__UpperCAmelCase : str = np.argmin([n.f for n in _open] )
__UpperCAmelCase : Optional[Any] = _open[min_f]
_closed.append(_open.pop(_UpperCAmelCase ) )
if current == goal:
break
for n in world.get_neigbours(_UpperCAmelCase ):
for c in _closed:
if c == n:
continue
__UpperCAmelCase : Optional[Any] = current.g + 1
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = n.position
__UpperCAmelCase , __UpperCAmelCase : Tuple = goal.position
__UpperCAmelCase : Union[str, Any] = (ya - ya) ** 2 + (xa - xa) ** 2
__UpperCAmelCase : Optional[Any] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_UpperCAmelCase )
__UpperCAmelCase : str = []
while current.parent is not None:
path.append(current.position )
__UpperCAmelCase : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__A =Gridworld()
# Start position and goal
__A =Cell()
__A =(0, 0)
__A =Cell()
__A =(4, 4)
print(f'''path from {start.position} to {goal.position}''')
__A =astar(world, start, goal)
# Just for visual reasons.
for i in s:
__A =1
print(world.w)
| 226 | 0 |
"""simple docstring"""
__UpperCamelCase = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609344,
"knot": 1.852,
}
__UpperCamelCase = {
"km/h": 1.0,
"m/s": 0.277777778,
"mph": 0.621371192,
"knot": 0.539956803,
}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
snake_case_ = (
f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
f'Valid values are: {", ".join(UpperCAmelCase )}'
)
raise ValueError(UpperCAmelCase )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312 | """simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizer
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def a_ ( self) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, unk_token='<unk>')
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = 'this is a test'
snake_case_ = 'this is a test'
return input_text, output_text
def a_ ( self) -> Optional[int]:
snake_case_ = '<pad>'
snake_case_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__), lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__), lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '<pad>')
self.assertEqual(vocab_keys[1], '<unk>')
self.assertEqual(vocab_keys[-1], '[PAD]')
self.assertEqual(len(lowerCAmelCase__), 3_0001)
def a_ ( self) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size, 3_0000)
def a_ ( self) -> List[str]:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> str:
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> List[Any]:
pass
def a_ ( self) -> str:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> List[Any]:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Any:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> int:
snake_case_ = 'This is a test'
snake_case_ = [13, 1, 4398, 25, 21, 1289]
snake_case_ = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
snake_case_ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
snake_case_ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__)
snake_case_ = tokenizer.encode('sequence builders')
snake_case_ = tokenizer.encode('multi-sequence build')
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__, lowerCAmelCase__)
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id], lowerCAmelCase__)
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id], lowerCAmelCase__, )
@slow
def a_ ( self) -> Union[str, Any]:
# fmt: off
snake_case_ = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__, model_name='microsoft/deberta-v2-xlarge', revision='ad6e42c1532ddf3a15c39246b63f5559d558b670', )
| 312 | 1 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> typing.Counter[int]:
"""simple docstring"""
_UpperCAmelCase : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(_UpperCAmelCase , max_perimeter + 1 ):
_UpperCAmelCase : Tuple = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(_UpperCAmelCase ):
_UpperCAmelCase : Optional[int] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def UpperCamelCase_ ( _UpperCAmelCase : int = 1_000 ) -> int:
"""simple docstring"""
_UpperCAmelCase : int = pythagorean_triple(_UpperCAmelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F'Perimeter {solution()} has maximum solutions')
| 31 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a ( a_ ):
UpperCAmelCase_ : List[Any] =["image_processor", "tokenizer"]
UpperCAmelCase_ : str ="AutoImageProcessor"
UpperCAmelCase_ : Any ="AutoTokenizer"
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowerCamelCase , )
lowercase = kwargs.pop('feature_extractor' )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_lowerCamelCase , _lowerCamelCase )
lowercase = self.image_processor
lowercase = False
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
lowercase = kwargs.pop('images' , _lowerCamelCase )
lowercase = kwargs.pop('text' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowercase = args[0]
lowercase = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
lowercase = self.image_processor(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
if text is not None:
lowercase = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowercase = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def UpperCamelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def UpperCamelCase_ ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
lowercase = True
lowercase = self.tokenizer
yield
lowercase = self.image_processor
lowercase = False
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=None ):
if added_vocab is None:
lowercase = self.tokenizer.get_added_vocab()
lowercase = {}
while tokens:
lowercase = re.search(R'<s_(.*?)>' , _lowerCamelCase , re.IGNORECASE )
if start_token is None:
break
lowercase = start_token.group(1 )
lowercase = re.search(RF'</s_{key}>' , _lowerCamelCase , re.IGNORECASE )
lowercase = start_token.group()
if end_token is None:
lowercase = tokens.replace(_lowerCamelCase , '' )
else:
lowercase = end_token.group()
lowercase = re.escape(_lowerCamelCase )
lowercase = re.escape(_lowerCamelCase )
lowercase = re.search(F'{start_token_escaped}(.*?){end_token_escaped}' , _lowerCamelCase , re.IGNORECASE )
if content is not None:
lowercase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowercase = self.tokenajson(_lowerCamelCase , is_inner_value=_lowerCamelCase , added_vocab=_lowerCamelCase )
if value:
if len(_lowerCamelCase ) == 1:
lowercase = value[0]
lowercase = value
else: # leaf nodes
lowercase = []
for leaf in content.split(R'<sep/>' ):
lowercase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowercase = leaf[1:-2] # for categorical special tokens
output[key].append(_lowerCamelCase )
if len(output[key] ) == 1:
lowercase = output[key][0]
lowercase = tokens[tokens.find(_lowerCamelCase ) + len(_lowerCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_lowerCamelCase , added_vocab=_lowerCamelCase )
if len(_lowerCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCamelCase_ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowerCamelCase , )
return self.image_processor_class
@property
def UpperCamelCase_ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _lowerCamelCase , )
return self.image_processor
| 220 | 0 |
import os
from collections.abc import Iterator
def lowerCAmelCase_ ( __lowerCamelCase = "." ):
for dir_path, dir_names, filenames in os.walk(__lowerCamelCase ):
__snake_case : Optional[int] = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCamelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCamelCase , __lowerCamelCase ).lstrip("./" )
def lowerCAmelCase_ ( __lowerCamelCase ):
return F'{i * " "}*' if i else "\n##"
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCamelCase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(__lowerCamelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def lowerCAmelCase_ ( __lowerCamelCase = "." ):
__snake_case : List[str] = ""
for filepath in sorted(good_file_paths(__lowerCamelCase ) ):
__snake_case : List[Any] = os.path.split(__lowerCamelCase )
if filepath != old_path:
__snake_case : int = print_path(__lowerCamelCase , __lowerCamelCase )
__snake_case : Union[str, Any] = (filepath.count(os.sep ) + 1) if filepath else 0
__snake_case : str = F'{filepath}/{filename}'.replace(" " , "%20" )
__snake_case : List[Any] = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(F'{md_prefix(__lowerCamelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md(".")
| 360 |
import logging
from transformers.configuration_utils import PretrainedConfig
_snake_case : Optional[Any] = logging.getLogger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = "masked_bert"
def __init__( self : Optional[int] , lowerCamelCase : Any=30522 , lowerCamelCase : Tuple=768 , lowerCamelCase : str=12 , lowerCamelCase : Dict=12 , lowerCamelCase : List[str]=3072 , lowerCamelCase : List[str]="gelu" , lowerCamelCase : Any=0.1 , lowerCamelCase : Any=0.1 , lowerCamelCase : List[Any]=512 , lowerCamelCase : int=2 , lowerCamelCase : str=0.02 , lowerCamelCase : List[str]=1E-12 , lowerCamelCase : Any=0 , lowerCamelCase : Dict="topK" , lowerCamelCase : List[Any]="constant" , lowerCamelCase : Dict=0.0 , **lowerCamelCase : List[Any] , ) -> Dict:
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
__snake_case : Optional[Any] = vocab_size
__snake_case : Optional[int] = hidden_size
__snake_case : Tuple = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Optional[Any] = hidden_act
__snake_case : str = intermediate_size
__snake_case : Tuple = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : List[str] = max_position_embeddings
__snake_case : Union[str, Any] = type_vocab_size
__snake_case : Any = initializer_range
__snake_case : str = layer_norm_eps
__snake_case : str = pruning_method
__snake_case : int = mask_init
__snake_case : Any = mask_scale
| 134 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'Salesforce/blip-image-captioning-base'
lowerCamelCase__ = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
lowerCamelCase__ = 'image_captioner'
lowerCamelCase__ = AutoModelForVisionaSeq
lowerCamelCase__ = ['image']
lowerCamelCase__ = ['text']
def __init__( self, *__a, **__a):
'''simple docstring'''
requires_backends(self, ["vision"])
super().__init__(*__a, **__a)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.pre_processor(images=__a, return_tensors="pt")
def snake_case__ ( self, __a):
'''simple docstring'''
return self.model.generate(**__a)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.pre_processor.batch_decode(__a, skip_special_tokens=__a)[0].strip()
| 36 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , a , )
class UpperCAmelCase_ ( a):
lowerCamelCase__ = RobertaConfig
lowerCamelCase__ = 'roberta'
def __init__( self, __a):
'''simple docstring'''
super().__init__(__a)
_lowerCAmelCase : Optional[Any] = RobertaEmbeddings(__a)
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , a , )
class UpperCAmelCase_ ( a):
lowerCamelCase__ = RobertaConfig
lowerCamelCase__ = 'roberta'
def __init__( self, __a):
'''simple docstring'''
super().__init__(__a)
_lowerCAmelCase : Optional[int] = config.num_labels
_lowerCAmelCase : Optional[int] = config.num_hidden_layers
_lowerCAmelCase : Optional[int] = DeeRobertaModel(__a)
_lowerCAmelCase : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob)
_lowerCAmelCase : List[str] = nn.Linear(config.hidden_size, self.config.num_labels)
@add_start_docstrings_to_model_forward(__a)
def snake_case__ ( self, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=-1, __a=False, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.num_layers
try:
_lowerCAmelCase : List[Any] = self.roberta(
__a, attention_mask=__a, token_type_ids=__a, position_ids=__a, head_mask=__a, inputs_embeds=__a, )
_lowerCAmelCase : List[Any] = outputs[1]
_lowerCAmelCase : Dict = self.dropout(__a)
_lowerCAmelCase : Dict = self.classifier(__a)
_lowerCAmelCase : Optional[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : Union[str, Any] = e.exit_layer
_lowerCAmelCase : List[Any] = outputs[0]
if not self.training:
_lowerCAmelCase : int = entropy(__a)
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : str = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Optional[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1), labels.view(-1))
else:
_lowerCAmelCase : Optional[Any] = CrossEntropyLoss()
_lowerCAmelCase : Optional[Any] = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
# work with highway exits
_lowerCAmelCase : Optional[int] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Any = highway_exit[0]
if not self.training:
highway_logits_all.append(__a)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[str] = MSELoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1), labels.view(-1))
else:
_lowerCAmelCase : Dict = CrossEntropyLoss()
_lowerCAmelCase : Optional[Any] = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1))
highway_losses.append(__a)
if train_highway:
_lowerCAmelCase : int = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Optional[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 36 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
_lowerCamelCase : int = None
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Tuple = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
_lowerCamelCase : int = {
'''t5-small''': 5_1_2,
'''t5-base''': 5_1_2,
'''t5-large''': 5_1_2,
'''t5-3b''': 5_1_2,
'''t5-11b''': 5_1_2,
}
class lowerCamelCase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ["input_ids", "attention_mask"]
UpperCAmelCase_ = TaTokenizer
UpperCAmelCase_ = []
def __init__( self : int, _UpperCAmelCase : Union[str, Any]=None, _UpperCAmelCase : str=None, _UpperCAmelCase : List[Any]="</s>", _UpperCAmelCase : Optional[Any]="<unk>", _UpperCAmelCase : str="<pad>", _UpperCAmelCase : Union[str, Any]=1_0_0, _UpperCAmelCase : Dict=None, **_UpperCAmelCase : Optional[int], ) -> Optional[Any]:
"""simple docstring"""
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE__ : str = [F'''<extra_id_{i}>''' for i in range(__UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(set(filter(lambda _UpperCAmelCase : bool("extra_id_" in str(__UpperCAmelCase ) ), __UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
__UpperCAmelCase, tokenizer_file=__UpperCAmelCase, eos_token=__UpperCAmelCase, unk_token=__UpperCAmelCase, pad_token=__UpperCAmelCase, extra_ids=__UpperCAmelCase, additional_special_tokens=__UpperCAmelCase, **__UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_file
SCREAMING_SNAKE_CASE__ : Any = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE__ : Tuple = extra_ids
@staticmethod
def A_ ( _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
SCREAMING_SNAKE_CASE__ : Optional[int] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value.", __UpperCAmelCase, )
return max_model_length
def A_ ( self : Tuple, _UpperCAmelCase : Optional[int], _UpperCAmelCase : Any = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(
__UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file, __UpperCAmelCase )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def A_ ( self : Union[str, Any], _UpperCAmelCase : List[str], _UpperCAmelCase : List[str] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def A_ ( self : Dict, _UpperCAmelCase : Optional[int], _UpperCAmelCase : Dict = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def A_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return list(
set(filter(lambda _UpperCAmelCase : bool(re.search(r"<extra_id_\d+>", __UpperCAmelCase ) ) is not None, self.additional_special_tokens ) ) )
def A_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return [self.convert_tokens_to_ids(__UpperCAmelCase ) for token in self.get_sentinel_tokens()]
| 366 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : complex , SCREAMING_SNAKE_CASE__ : str = "x" , SCREAMING_SNAKE_CASE__ : float = 10**-10 , SCREAMING_SNAKE_CASE__ : int = 1 , ) -> complex:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = symbols(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = lambdify(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = lambdify(SCREAMING_SNAKE_CASE__ , diff(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Any = starting_point
while True:
if diff_function(SCREAMING_SNAKE_CASE__ ) != 0:
SCREAMING_SNAKE_CASE__ : Any = prev_guess - multiplicity * func(SCREAMING_SNAKE_CASE__ ) / diff_function(
SCREAMING_SNAKE_CASE__ )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
SCREAMING_SNAKE_CASE__ : Optional[int] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
# Find fourth Root of 5
print(f"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"{newton_raphson('log(y) - 1', 2, variable='y')}",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"{newton_raphson('exp(x) - 1', 1_0, precision=0.005)}",
)
# Find root of cos(x)
print(f"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
| 191 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def __magic_name__ ( ) -> None:
'''simple docstring'''
assert nand_gate(0, 0 ) == 1
assert nand_gate(0, 1 ) == 1
assert nand_gate(1, 0 ) == 1
assert nand_gate(1, 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 56 |
'''simple docstring'''
from maths.prime_factors import prime_factors
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : int = f"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCamelCase )
if number < 1:
raise ValueError("""Input must be a positive integer""" )
return -1 if len(prime_factors(UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 | 0 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 211 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1_024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1_024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1e-5,
'''token_type_vocab_size''': 2,
}
lowerCAmelCase__ : int = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowerCAmelCase__ : List[Any] = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=_a , output_all_encodings=_a , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , _a ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowerCAmelCase__ : Union[str, Any] = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
lowerCAmelCase__ : Optional[Any] = os.path.join(get_home_dir() , '''models''' )
lowerCAmelCase__ : Optional[int] = _load_vocab(_a , _a , _a , cls=_a )
lowerCAmelCase__ : Any = nlp.model.BERTModel(
_a , len(_a ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=_a , use_token_type_embed=_a , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=_a , use_decoder=_a , )
original_bort.load_parameters(_a , cast_dtype=_a , ignore_extra=_a )
lowerCAmelCase__ : Tuple = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowerCAmelCase__ : int = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(_a ),
}
lowerCAmelCase__ : str = BertConfig.from_dict(_a )
lowerCAmelCase__ : Optional[Any] = BertForMaskedLM(_a )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_a ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_a , _a ):
lowerCAmelCase__ : Dict = hf_param.shape
lowerCAmelCase__ : List[str] = to_torch(params[gluon_param] )
lowerCAmelCase__ : Any = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
lowerCAmelCase__ : List[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
lowerCAmelCase__ : List[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
lowerCAmelCase__ : Any = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
lowerCAmelCase__ : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowerCAmelCase__ : Union[str, Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowerCAmelCase__ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
lowerCAmelCase__ : BertSelfAttention = layer.attention.self
lowerCAmelCase__ : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
lowerCAmelCase__ : str = check_and_map_params(
self_attn.key.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
lowerCAmelCase__ : int = check_and_map_params(
self_attn.query.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
lowerCAmelCase__ : Optional[Any] = check_and_map_params(
self_attn.query.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
lowerCAmelCase__ : Any = check_and_map_params(
self_attn.value.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
lowerCAmelCase__ : Any = check_and_map_params(
self_attn.value.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
lowerCAmelCase__ : BertSelfOutput = layer.attention.output
lowerCAmelCase__ : Dict = check_and_map_params(
self_output.dense.bias , f'encoder.transformer_cells.{i}.proj.bias' )
lowerCAmelCase__ : Optional[int] = check_and_map_params(
self_output.dense.weight , f'encoder.transformer_cells.{i}.proj.weight' )
lowerCAmelCase__ : int = check_and_map_params(
self_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.layer_norm.beta' )
lowerCAmelCase__ : List[str] = check_and_map_params(
self_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
lowerCAmelCase__ : BertIntermediate = layer.intermediate
lowerCAmelCase__ : Union[str, Any] = check_and_map_params(
intermediate.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
lowerCAmelCase__ : Union[str, Any] = check_and_map_params(
intermediate.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
lowerCAmelCase__ : BertOutput = layer.output
lowerCAmelCase__ : Optional[int] = check_and_map_params(
bert_output.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
lowerCAmelCase__ : int = check_and_map_params(
bert_output.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
lowerCAmelCase__ : Optional[int] = check_and_map_params(
bert_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
lowerCAmelCase__ : List[str] = check_and_map_params(
bert_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowerCAmelCase__ : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' )
lowerCAmelCase__ : List[str] = tokenizer.encode_plus(_a )['''input_ids''']
# Get gluon output
lowerCAmelCase__ : str = mx.nd.array([input_ids] )
lowerCAmelCase__ : List[str] = original_bort(inputs=_a , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_a )
lowerCAmelCase__ : Optional[int] = BertModel.from_pretrained(_a )
hf_bort_model.eval()
lowerCAmelCase__ : Tuple = tokenizer.encode_plus(_a , return_tensors='''pt''' )
lowerCAmelCase__ : Optional[Any] = hf_bort_model(**_a )[0]
lowerCAmelCase__ : str = output_gluon[0].asnumpy()
lowerCAmelCase__ : Optional[Any] = output_hf[0].detach().numpy()
lowerCAmelCase__ : str = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowerCAmelCase__ : int = np.allclose(_a , _a , atol=1e-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , _a )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 211 | 1 |
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_UpperCAmelCase = get_logger(__name__)
class a ( enum.Enum ):
UpperCamelCase : List[Any] = 'all_checks'
UpperCamelCase : Tuple = 'basic_checks'
UpperCamelCase : str = 'no_checks'
class a ( UpperCAmelCase__ ):
pass
class a ( UpperCAmelCase__ ):
pass
class a ( UpperCAmelCase__ ):
pass
class a ( UpperCAmelCase__ ):
pass
def __magic_name__ ( lowercase , lowercase , lowercase=None ):
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(lowercase ) - set(lowercase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowercase ) - set(lowercase ) ) )
if len(set(lowercase ) - set(lowercase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowercase ) - set(lowercase ) ) )
SCREAMING_SNAKE_CASE_: int =[url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE_: int =""" for """ + verification_name if verification_name is not None else """"""
if len(lowercase ) > 0:
raise NonMatchingChecksumError(
f'''Checksums didn\'t match{for_verification_name}:\n'''
f'''{bad_urls}\n'''
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class a ( UpperCAmelCase__ ):
pass
class a ( UpperCAmelCase__ ):
pass
class a ( UpperCAmelCase__ ):
pass
class a ( UpperCAmelCase__ ):
pass
def __magic_name__ ( lowercase , lowercase ):
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(lowercase ) - set(lowercase ) ) > 0:
raise ExpectedMoreSplits(str(set(lowercase ) - set(lowercase ) ) )
if len(set(lowercase ) - set(lowercase ) ) > 0:
raise UnexpectedSplits(str(set(lowercase ) - set(lowercase ) ) )
SCREAMING_SNAKE_CASE_: str =[
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowercase ) > 0:
raise NonMatchingSplitsSizesError(str(lowercase ) )
logger.info("""All the splits matched successfully.""" )
def __magic_name__ ( lowercase , lowercase = True ):
if record_checksum:
SCREAMING_SNAKE_CASE_: Optional[Any] =shaaaa()
with open(lowercase , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b"""""" ):
m.update(lowercase )
SCREAMING_SNAKE_CASE_: List[str] =m.hexdigest()
else:
SCREAMING_SNAKE_CASE_: Any =None
return {"num_bytes": os.path.getsize(lowercase ), "checksum": checksum}
def __magic_name__ ( lowercase ):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 173 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Dict = 'unispeech'
def __init__( self : int , lowerCAmelCase : Dict=32 , lowerCAmelCase : Any=768 , lowerCAmelCase : List[str]=12 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : Tuple=3072 , lowerCAmelCase : int="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Optional[Any]=0.0_2 , lowerCAmelCase : List[Any]=1E-5 , lowerCAmelCase : List[str]="group" , lowerCAmelCase : int="gelu" , lowerCAmelCase : Dict=(512, 512, 512, 512, 512, 512, 512) , lowerCAmelCase : Any=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase : Dict=False , lowerCAmelCase : Optional[int]=128 , lowerCAmelCase : Optional[Any]=16 , lowerCAmelCase : str=False , lowerCAmelCase : Tuple=True , lowerCAmelCase : str=0.0_5 , lowerCAmelCase : Optional[int]=10 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : List[str]=10 , lowerCAmelCase : str=0 , lowerCAmelCase : str=320 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Optional[int]=100 , lowerCAmelCase : Dict=256 , lowerCAmelCase : str=256 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Dict="mean" , lowerCAmelCase : Dict=False , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Any=256 , lowerCAmelCase : List[str]=80 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : int=1 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Union[str, Any]=0.5 , **lowerCAmelCase : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase , pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =hidden_size
SCREAMING_SNAKE_CASE_: Dict =feat_extract_norm
SCREAMING_SNAKE_CASE_: Tuple =feat_extract_activation
SCREAMING_SNAKE_CASE_: Tuple =list(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =list(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =list(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =conv_bias
SCREAMING_SNAKE_CASE_: Optional[int] =num_conv_pos_embeddings
SCREAMING_SNAKE_CASE_: Dict =num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE_: List[str] =len(self.conv_dim )
SCREAMING_SNAKE_CASE_: List[str] =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =intermediate_size
SCREAMING_SNAKE_CASE_: Optional[Any] =hidden_act
SCREAMING_SNAKE_CASE_: Optional[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: Dict =hidden_dropout
SCREAMING_SNAKE_CASE_: List[Any] =attention_dropout
SCREAMING_SNAKE_CASE_: List[Any] =activation_dropout
SCREAMING_SNAKE_CASE_: Dict =feat_proj_dropout
SCREAMING_SNAKE_CASE_: Dict =final_dropout
SCREAMING_SNAKE_CASE_: Tuple =layerdrop
SCREAMING_SNAKE_CASE_: Tuple =layer_norm_eps
SCREAMING_SNAKE_CASE_: str =initializer_range
SCREAMING_SNAKE_CASE_: Optional[Any] =num_ctc_classes
SCREAMING_SNAKE_CASE_: Any =vocab_size
SCREAMING_SNAKE_CASE_: Optional[Any] =do_stable_layer_norm
SCREAMING_SNAKE_CASE_: List[str] =use_weighted_layer_sum
SCREAMING_SNAKE_CASE_: str =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_: Optional[int] =apply_spec_augment
SCREAMING_SNAKE_CASE_: str =mask_time_prob
SCREAMING_SNAKE_CASE_: List[Any] =mask_time_length
SCREAMING_SNAKE_CASE_: Any =mask_time_min_masks
SCREAMING_SNAKE_CASE_: Union[str, Any] =mask_feature_prob
SCREAMING_SNAKE_CASE_: Dict =mask_feature_length
SCREAMING_SNAKE_CASE_: Optional[Any] =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE_: List[str] =num_codevectors_per_group
SCREAMING_SNAKE_CASE_: Union[str, Any] =num_codevector_groups
SCREAMING_SNAKE_CASE_: List[str] =contrastive_logits_temperature
SCREAMING_SNAKE_CASE_: str =feat_quantizer_dropout
SCREAMING_SNAKE_CASE_: Union[str, Any] =num_negatives
SCREAMING_SNAKE_CASE_: Optional[Any] =codevector_dim
SCREAMING_SNAKE_CASE_: str =proj_codevector_dim
SCREAMING_SNAKE_CASE_: Tuple =diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE_: str =ctc_loss_reduction
SCREAMING_SNAKE_CASE_: List[str] =ctc_zero_infinity
# pretraining loss
SCREAMING_SNAKE_CASE_: Optional[int] =replace_prob
@property
def lowerCamelCase__ ( self : Tuple ) -> Any:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 173 | 1 |
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str]=28_123 ):
__a : Union[str, Any] = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__a : List[Any] = set()
__a : str = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(_SCREAMING_SNAKE_CASE )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 294 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=400 , __a=True , __a=None , __a=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {'height': 18, 'width': 18}
__a : int = parent
__a : Dict = batch_size
__a : Optional[int] = num_channels
__a : List[Any] = image_size
__a : Tuple = min_resolution
__a : str = max_resolution
__a : str = do_resize
__a : Optional[Any] = size
__a : str = apply_ocr
def __UpperCAmelCase ( self ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = LayoutLMvaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , 'do_resize' ) )
self.assertTrue(hasattr(__a , 'size' ) )
self.assertTrue(hasattr(__a , 'apply_ocr' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__a : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
__a : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , __a )
self.assertIsInstance(encoding.boxes , __a )
# Test batched
__a : Any = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__a : Tuple = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
__a : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__a : List[str] = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
__a : str = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__a : Tuple = Image.open(ds[0]['file'] ).convert('RGB' )
__a : Optional[Any] = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__a : Optional[Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__a : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __a )
self.assertListEqual(encoding.boxes , __a )
# with apply_OCR = False
__a : List[Any] = LayoutLMvaImageProcessor(apply_ocr=__a )
__a : List[Any] = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 294 | 1 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase=None , lowercase=None ):
_lowerCamelCase : Any = start
_lowerCamelCase : Optional[Any] = end
_lowerCamelCase : Optional[int] = val
_lowerCamelCase : List[str] = (start + end) // 2
_lowerCamelCase : Any = left
_lowerCamelCase : Tuple = right
def __repr__( self ):
return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase ):
_lowerCamelCase : List[Any] = collection
_lowerCamelCase : Tuple = function
if self.collection:
_lowerCamelCase : str = self._build_tree(0 , len(lowercase ) - 1 )
def A_ ( self , lowercase , lowercase ):
self._update_tree(self.root , lowercase , lowercase )
def A_ ( self , lowercase , lowercase ):
return self._query_range(self.root , lowercase , lowercase )
def A_ ( self , lowercase , lowercase ):
if start == end:
return SegmentTreeNode(lowercase , lowercase , self.collection[start] )
_lowerCamelCase : int = (start + end) // 2
_lowerCamelCase : Any = self._build_tree(lowercase , lowercase )
_lowerCamelCase : Union[str, Any] = self._build_tree(mid + 1 , lowercase )
return SegmentTreeNode(lowercase , lowercase , self.fn(left.val , right.val ) , lowercase , lowercase )
def A_ ( self , lowercase , lowercase , lowercase ):
if node.start == i and node.end == i:
_lowerCamelCase : Any = val
return
if i <= node.mid:
self._update_tree(node.left , lowercase , lowercase )
else:
self._update_tree(node.right , lowercase , lowercase )
_lowerCamelCase : List[str] = self.fn(node.left.val , node.right.val )
def A_ ( self , lowercase , lowercase , lowercase ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , lowercase , lowercase )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , lowercase , node.mid ) , self._query_range(node.right , node.mid + 1 , lowercase ) , )
else:
# range in right child tree
return self._query_range(node.right , lowercase , lowercase )
def A_ ( self ):
if self.root is not None:
_lowerCamelCase : List[str] = Queue()
queue.put(self.root )
while not queue.empty():
_lowerCamelCase : List[str] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
lowercase__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print() | 96 |
"""simple docstring"""
import math
def _snake_case ( lowercase__ ):
return math.sqrt(lowercase__ ) * math.sqrt(lowercase__ ) == num
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[Any] = n
while left <= right:
_lowerCamelCase : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_lowerCamelCase : str = mid - 1
else:
_lowerCamelCase : Optional[int] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[tf.Tensor, np.ndarray] ):
if isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
return list(tensor.shape )
__a : Tuple = tf.shape(_SCREAMING_SNAKE_CASE )
if tensor.shape == tf.TensorShape(_SCREAMING_SNAKE_CASE ):
return dynamic
__a : List[Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_SCREAMING_SNAKE_CASE )]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : tf.Tensor , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1e-9 , axis=_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple=1e-5 , _SCREAMING_SNAKE_CASE : Dict=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
__a , __a : Any = tf.nn.moments(_SCREAMING_SNAKE_CASE , axes=[axis] , keepdims=_SCREAMING_SNAKE_CASE )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__a : str = [1] * inputs.shape.rank
__a : int = shape_list(_SCREAMING_SNAKE_CASE )[axis]
__a : int = tf.reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : List[Any] = tf.reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Compute layer normalization using the batch_normalization
# function.
__a : str = tf.nn.batch_normalization(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , offset=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , variance_epsilon=_SCREAMING_SNAKE_CASE , )
return outputs
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any=0 , _SCREAMING_SNAKE_CASE : Union[str, Any]=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__a : Any = tf.shape(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__a : List[Any] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : tf.Tensor ):
if not isinstance(_SCREAMING_SNAKE_CASE , tf.Tensor ):
__a : List[str] = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__a : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__a : str = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__a : Optional[int] = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowerCamelCase (_SCREAMING_SNAKE_CASE : tf.Tensor , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str = "input_ids" ):
tf.debugging.assert_less(
_SCREAMING_SNAKE_CASE , tf.cast(_SCREAMING_SNAKE_CASE , dtype=tensor.dtype ) , message=(
F"""The maximum value of {tensor_name} ({tf.math.reduce_max(_SCREAMING_SNAKE_CASE )}) must be smaller than the embedding """
F"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ):
__a : Tuple = 64_512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__a : str = [x for x in data if len(_SCREAMING_SNAKE_CASE ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
F"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
F"""bytes: {bad_attributes}""" )
__a : Any = np.asarray(_SCREAMING_SNAKE_CASE )
__a : str = 1
__a : List[str] = np.array_split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__a : List[Any] = np.array_split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_SCREAMING_SNAKE_CASE ):
__a : int = chunk_data
else:
__a : Tuple = data
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict ):
if name in group.attrs:
__a : List[str] = [n.decode('utf8' ) if hasattr(_SCREAMING_SNAKE_CASE , 'decode' ) else n for n in group.attrs[name]]
else:
__a : str = []
__a : Optional[int] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(_SCREAMING_SNAKE_CASE , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
def _expand_single_ad_tensor(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
if isinstance(_SCREAMING_SNAKE_CASE , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_SCREAMING_SNAKE_CASE , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , _SCREAMING_SNAKE_CASE )
| 294 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=False , __a=True , __a="None" , __a=3 , __a=4 , __a=None , ):
'''simple docstring'''
__a : int = parent
__a : Union[str, Any] = batch_size
__a : Optional[int] = seq_length
__a : List[str] = is_training
__a : Any = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : Any = use_labels
__a : List[str] = vocab_size
__a : str = hidden_size
__a : List[str] = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[int] = intermediate_size
__a : Tuple = hidden_act
__a : Union[str, Any] = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : Dict = type_vocab_size
__a : Any = type_sequence_label_size
__a : Dict = initializer_range
__a : Optional[Any] = num_labels
__a : Optional[Any] = num_choices
__a : Union[str, Any] = relative_attention
__a : List[str] = position_biased_input
__a : List[Any] = pos_att_type
__a : Tuple = scope
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : List[Any] = None
if self.use_input_mask:
__a : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__a : Any = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Optional[int] = None
__a : int = None
__a : Dict = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__a : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Dict = DebertaVaModel(config=__a )
model.to(__a )
model.eval()
__a : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a )[0]
__a : str = model(__a , token_type_ids=__a )[0]
__a : Optional[int] = model(__a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : int = DebertaVaForMaskedLM(config=__a )
model.to(__a )
model.eval()
__a : List[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[Any] = self.num_labels
__a : List[Any] = DebertaVaForSequenceClassification(__a )
model.to(__a )
model.eval()
__a : Any = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__a )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Dict = DebertaVaForTokenClassification(config=__a )
model.to(__a )
model.eval()
__a : str = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[str] = DebertaVaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
__a : str = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = DebertaVaForMultipleChoice(config=__a )
model.to(__a )
model.eval()
__a : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : int = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Dict = config_and_inputs
__a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = DebertaVaModelTester(self )
__a : List[str] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : str = DebertaVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
__a : Optional[Any] = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__a : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__a : int = model(__a , attention_mask=__a )[0]
# compare the actual values for a slice.
__a : str = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 294 | 1 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 / sqrt(2 ) ) -> List[str]:
_a : Union[str, Any] = tau * frequency / samplerate
_a : Optional[Any] = sin(lowerCamelCase__ )
_a : Dict = cos(lowerCamelCase__ )
_a : int = _sin / (2 * q_factor)
_a : Union[str, Any] = (1 - _cos) / 2
_a : List[Any] = 1 - _cos
_a : Optional[Any] = 1 + alpha
_a : Optional[int] = -2 * _cos
_a : Optional[int] = 1 - alpha
_a : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 / sqrt(2 ) ) -> List[Any]:
_a : Optional[int] = tau * frequency / samplerate
_a : int = sin(lowerCamelCase__ )
_a : List[Any] = cos(lowerCamelCase__ )
_a : str = _sin / (2 * q_factor)
_a : str = (1 + _cos) / 2
_a : Optional[int] = -1 - _cos
_a : Dict = 1 + alpha
_a : Tuple = -2 * _cos
_a : str = 1 - alpha
_a : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 / sqrt(2 ) ) -> List[str]:
_a : List[Any] = tau * frequency / samplerate
_a : Any = sin(lowerCamelCase__ )
_a : Any = cos(lowerCamelCase__ )
_a : Union[str, Any] = _sin / (2 * q_factor)
_a : int = _sin / 2
_a : Dict = 0
_a : Tuple = -ba
_a : int = 1 + alpha
_a : str = -2 * _cos
_a : Union[str, Any] = 1 - alpha
_a : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 / sqrt(2 ) ) -> List[str]:
_a : List[str] = tau * frequency / samplerate
_a : Optional[Any] = sin(lowerCamelCase__ )
_a : List[str] = cos(lowerCamelCase__ )
_a : Any = _sin / (2 * q_factor)
_a : Tuple = 1 - alpha
_a : Tuple = -2 * _cos
_a : List[str] = 1 + alpha
_a : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 / sqrt(2 ) , ) -> Union[str, Any]:
_a : List[str] = tau * frequency / samplerate
_a : Optional[Any] = sin(lowerCamelCase__ )
_a : Dict = cos(lowerCamelCase__ )
_a : List[str] = _sin / (2 * q_factor)
_a : List[str] = 10 ** (gain_db / 40)
_a : Any = 1 + alpha * big_a
_a : int = -2 * _cos
_a : Dict = 1 - alpha * big_a
_a : Union[str, Any] = 1 + alpha / big_a
_a : Any = -2 * _cos
_a : Optional[Any] = 1 - alpha / big_a
_a : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 / sqrt(2 ) , ) -> List[Any]:
_a : Any = tau * frequency / samplerate
_a : List[str] = sin(lowerCamelCase__ )
_a : Tuple = cos(lowerCamelCase__ )
_a : Optional[int] = _sin / (2 * q_factor)
_a : str = 10 ** (gain_db / 40)
_a : List[str] = (big_a + 1) - (big_a - 1) * _cos
_a : List[Any] = (big_a + 1) + (big_a - 1) * _cos
_a : Any = (big_a - 1) - (big_a + 1) * _cos
_a : str = (big_a - 1) + (big_a + 1) * _cos
_a : Dict = 2 * sqrt(lowerCamelCase__ ) * alpha
_a : Optional[int] = big_a * (pmc + aaa)
_a : Any = 2 * big_a * mpc
_a : Union[str, Any] = big_a * (pmc - aaa)
_a : List[str] = ppmc + aaa
_a : int = -2 * pmpc
_a : int = ppmc - aaa
_a : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 / sqrt(2 ) , ) -> Union[str, Any]:
_a : Union[str, Any] = tau * frequency / samplerate
_a : str = sin(lowerCamelCase__ )
_a : List[str] = cos(lowerCamelCase__ )
_a : List[str] = _sin / (2 * q_factor)
_a : List[str] = 10 ** (gain_db / 40)
_a : str = (big_a + 1) - (big_a - 1) * _cos
_a : Optional[Any] = (big_a + 1) + (big_a - 1) * _cos
_a : Any = (big_a - 1) - (big_a + 1) * _cos
_a : Any = (big_a - 1) + (big_a + 1) * _cos
_a : Union[str, Any] = 2 * sqrt(lowerCamelCase__ ) * alpha
_a : Tuple = big_a * (ppmc + aaa)
_a : int = -2 * big_a * pmpc
_a : Optional[Any] = big_a * (ppmc - aaa)
_a : Tuple = pmc + aaa
_a : int = 2 * mpc
_a : int = pmc - aaa
_a : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 89 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=3 , A=30 , A=4_00 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , A=True , A=1 / 2_55 , A=True , ) -> str:
'''simple docstring'''
lowerCamelCase = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = num_channels
lowerCamelCase = min_resolution
lowerCamelCase = max_resolution
lowerCamelCase = do_resize
lowerCamelCase = size
lowerCamelCase = do_normalize
lowerCamelCase = image_mean
lowerCamelCase = image_std
lowerCamelCase = do_rescale
lowerCamelCase = rescale_factor
lowerCamelCase = do_pad
def __A ( self ) -> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __A ( self , A , A=False ) -> List[Any]:
'''simple docstring'''
if not batched:
lowerCamelCase = image_inputs[0]
if isinstance(A , Image.Image ):
lowerCamelCase , lowerCamelCase = image.size
else:
lowerCamelCase , lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase = int(self.size["""shortest_edge"""] * h / w )
lowerCamelCase = self.size["""shortest_edge"""]
elif w > h:
lowerCamelCase = self.size["""shortest_edge"""]
lowerCamelCase = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCamelCase = self.size["""shortest_edge"""]
lowerCamelCase = self.size["""shortest_edge"""]
else:
lowerCamelCase = []
for image in image_inputs:
lowerCamelCase , lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase = max(A , key=lambda A : item[0] )[0]
lowerCamelCase = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = YolosImageProcessor if is_vision_available() else None
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = YolosImageProcessingTester(self )
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , """image_mean""" ) )
self.assertTrue(hasattr(A , """image_std""" ) )
self.assertTrue(hasattr(A , """do_normalize""" ) )
self.assertTrue(hasattr(A , """do_resize""" ) )
self.assertTrue(hasattr(A , """size""" ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , A )
lowerCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A , batched=A )
lowerCamelCase = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase = image_processing(A , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase = image_processing(A , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
lowerCamelCase = self.image_processing_class(do_resize=A , do_normalize=A , do_rescale=A )
# create random PyTorch tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowerCamelCase = image_processing_a.pad(A , return_tensors="""pt""" )
lowerCamelCase = image_processing_a(A , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCamelCase = json.loads(f.read() )
lowerCamelCase = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
lowerCamelCase = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
lowerCamelCase = image_processing(images=A , annotations=A , return_tensors="""pt""" )
# verify pixel values
lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , A )
lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowerCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A ) )
# verify boxes
lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A )
lowerCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A , atol=1e-3 ) )
# verify image_id
lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A ) )
# verify is_crowd
lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A ) )
# verify class_labels
lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A ) )
# verify orig_size
lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A ) )
# verify size
lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A ) )
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCamelCase = json.loads(f.read() )
lowerCamelCase = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
lowerCamelCase = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCamelCase = YolosImageProcessor(format="""coco_panoptic""" )
lowerCamelCase = image_processing(images=A , annotations=A , masks_path=A , return_tensors="""pt""" )
# verify pixel values
lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , A )
lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowerCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A ) )
# verify boxes
lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A )
lowerCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A , atol=1e-3 ) )
# verify image_id
lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A ) )
# verify is_crowd
lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A ) )
# verify class_labels
lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A ) )
# verify masks
lowerCamelCase = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , A )
# verify orig_size
lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A ) )
# verify size
lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A ) )
| 252 | 0 |
import numpy as np
class A :
'''simple docstring'''
def __init__( self : List[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : str=None , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Tuple=None ) -> Optional[Any]:
"""simple docstring"""
self.set_matricies(red=lowercase__ , green=lowercase__ , blue=lowercase__ , red_edge=lowercase__ , nir=lowercase__ )
def a_ ( self : str , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Union[str, Any]=None ) -> List[str]:
"""simple docstring"""
if red is not None:
A__ = red
if green is not None:
A__ = green
if blue is not None:
A__ = blue
if red_edge is not None:
A__ = red_edge
if nir is not None:
A__ = nir
return True
def a_ ( self : List[str] , __lowerCAmelCase : int="" , __lowerCAmelCase : Any=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Tuple=None ) -> Optional[int]:
"""simple docstring"""
self.set_matricies(red=lowercase__ , green=lowercase__ , blue=lowercase__ , red_edge=lowercase__ , nir=lowercase__ )
A__ = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def a_ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def a_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def a_ ( self : int ) -> str:
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def a_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def a_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def a_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def a_ ( self : Optional[int] ) -> int:
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def a_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def a_ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def a_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def a_ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def a_ ( self : Dict ) -> Dict:
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def a_ ( self : Dict , __lowerCAmelCase : List[str]=0.0_8 , __lowerCAmelCase : Any=1.2_2 , __lowerCAmelCase : Dict=0.0_3 ) -> Tuple:
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def a_ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def a_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return (self.nir / self.green) - 1
def a_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def a_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return (self.red - self.blue) / self.red
def a_ ( self : Tuple ) -> int:
"""simple docstring"""
A__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def a_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return self.nir - self.green
def a_ ( self : Dict ) -> Tuple:
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def a_ ( self : Any ) -> List[Any]:
"""simple docstring"""
A__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def a_ ( self : int , __lowerCAmelCase : Dict=0.1_6 ) -> List[str]:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def a_ ( self : str , __lowerCAmelCase : List[str]=0.5 ) -> Dict:
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def a_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def a_ ( self : Dict , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None ) -> List[str]:
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def a_ ( self : Tuple ) -> str:
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def a_ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def a_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.nir / self.red
def a_ ( self : List[Any] ) -> str:
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def a_ ( self : int ) -> str:
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def a_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def a_ ( self : List[str] ) -> str:
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def a_ ( self : str ) -> str:
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def a_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def a_ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def a_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
A__ = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
A__ = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def a_ ( self : Any ) -> str:
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def a_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.nir / self.red
def a_ ( self : int ) -> Tuple:
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def a_ ( self : List[Any] ) -> int:
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 358 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class A :
'''simple docstring'''
__lowerCamelCase : Optional[Any] = BlenderbotSmallConfig
__lowerCamelCase : Optional[Any] = {}
__lowerCamelCase : List[Any] = '''gelu'''
def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Union[str, Any]=99 , __lowerCAmelCase : Union[str, Any]=32 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : List[str]=20 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : int=0 , ) -> Any:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
def a_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ = prepare_blenderbot_small_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def a_ ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
A__ = TFBlenderbotSmallModel(config=__lowerCAmelCase ).get_decoder()
A__ = inputs_dict["""input_ids"""]
A__ = input_ids[:1, :]
A__ = inputs_dict["""attention_mask"""][:1, :]
A__ = inputs_dict["""head_mask"""]
A__ = 1
# first forward pass
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , head_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
A__ , A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ = output_from_no_past[:, -3:, random_slice_idx]
A__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 )
def __lowerCamelCase ( __a :Dict , __a :Tuple , __a :List[Any] , __a :List[str]=None , __a :List[Any]=None , __a :Optional[Any]=None , __a :List[str]=None , __a :int=None , ) -> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
A__ = tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__lowerCamelCase : List[Any] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase : Tuple = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase : Dict = True
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Tuple = False
def a_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
A__ = TFBlenderbotSmallModelTester(self )
A__ = ConfigTester(self , config_class=__lowerCAmelCase )
def a_ ( self : List[str] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : List[str] ) -> Any:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCAmelCase )
@require_tokenizers
@require_tf
class A (unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
__lowerCamelCase : Optional[int] = '''facebook/blenderbot_small-90M'''
@cached_property
def a_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
@cached_property
def a_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def a_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
A__ = self.tokenizer(self.src_text , return_tensors="""tf""" )
A__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCAmelCase , )
A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCAmelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 276 | 0 |
__a :dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_9344,
"knot": 1.852,
}
__a :dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_7777_7778,
"mph": 0.6_2137_1192,
"knot": 0.5_3995_6803,
}
def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
A_ = (
f'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'''
f'''Valid values are: {", ".join(__UpperCamelCase )}'''
)
raise ValueError(__UpperCamelCase )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] ,3 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 312 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
__a :Any = TypeVar('T')
__a :Union[str, Any] = Union[List[T], Tuple[T, ...]]
__a :List[str] = Union[T, List[T], Dict[str, T]]
__a :Any = Union[str, bytes, os.PathLike] | 312 | 1 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a__ = '''src/transformers'''
a__ = '''docs/source/en/tasks'''
def __UpperCAmelCase ( __a : Optional[int] ,__a : str ,__a : str ) -> int:
"""simple docstring"""
with open(a__ ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Any = f.readlines()
# Find the start prompt.
_a : List[Any] = 0
while not lines[start_index].startswith(a__ ):
start_index += 1
start_index += 1
_a : str = start_index
while not lines[end_index].startswith(a__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a__ = direct_transformers_import(TRANSFORMERS_PATH)
a__ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a__ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def __UpperCAmelCase ( __a : Dict ) -> Optional[int]:
"""simple docstring"""
_a : List[Any] = TASK_GUIDE_TO_MODELS[task_guide]
_a : int = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(a__ ,set() )
_a : Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def __UpperCAmelCase ( __a : int ,__a : int=False ) -> List[str]:
"""simple docstring"""
_a , _a , _a , _a : Union[str, Any] = _find_text_in_file(
filename=os.path.join(a__ ,a__ ) ,start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' ,end_prompt='''<!--End of the generated tip-->''' ,)
_a : str = get_model_list_for_task(a__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(a__ ,a__ ) ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
''' to fix this.''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a__ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 359 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
a__ = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
a__ = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
a__ = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __UpperCAmelCase ( __a : int ,__a : List[str] ) -> Optional[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ,__a : List[str]="binary" ) -> Optional[int]:
"""simple docstring"""
_a : List[str] = simple_accuracy(__a ,__a )
_a : Any = float(fa_score(y_true=__a ,y_pred=__a ,average=__a ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( __a : Optional[Any] ,__a : str ) -> List[Any]:
"""simple docstring"""
_a : Union[str, Any] = {}
for id_pred, label in zip(__a ,__a ):
_a : Optional[int] = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_a : Optional[Any] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_a : str = [(pred, label)]
_a , _a : Any = [], []
for question, preds_labels in question_map.items():
_a , _a : Any = zip(*__a )
_a : List[Any] = fa_score(y_true=__a ,y_pred=__a ,average='''macro''' )
fas.append(__a )
_a : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(__a ) )
ems.append(__a )
_a : List[str] = float(sum(__a ) / len(__a ) )
_a : str = sum(__a ) / len(__a )
_a : Optional[int] = float(fa_score(y_true=__a ,y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __lowercase ( self ) -> Any:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __lowercase ( self , _a , _a ) -> Optional[Any]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_a , _a )}
elif self.config_name == "cb":
return acc_and_fa(_a , _a , fa_avg='''macro''' )
elif self.config_name == "record":
_a : Any = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
_a : Any = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(_a , _a )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_a , _a )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_a , _a )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 15 | 0 |
def __magic_name__ ( A : float, A : float ):
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = XLMRobertaTokenizer
SCREAMING_SNAKE_CASE_ : int = XLMRobertaTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
def __UpperCAmelCase ( self : int ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
a = XLMRobertaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Any:
a = "<pad>"
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__lowerCamelCase ) , 10_02 )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = XLMRobertaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
a = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
a = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCamelCase )
a = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
a = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCamelCase )
a = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=True
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
a = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCamelCase )
a = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=False
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
a = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCamelCase )
a = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
@cached_property
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCamelCase , f.name )
a = XLMRobertaTokenizer(f.name , keep_accents=__lowerCamelCase )
a = pickle.dumps(__lowerCamelCase )
pickle.loads(__lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> str:
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = "I was born in 92000, and this is falsé."
a = tokenizer.tokenize(__lowerCamelCase )
a = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = self.get_rust_tokenizer()
a = tokenizer.encode(__lowerCamelCase )
a = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def __UpperCAmelCase ( self : Dict ) -> Any:
a = "Hello World!"
a = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCamelCase , self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Tuple ) -> int:
a = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
a = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCamelCase , self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
# fmt: off
a = {"input_ids": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 107 | 1 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
UpperCAmelCase__ : int =HfArgumentParser(InitializationArguments)
UpperCAmelCase__ : str =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
UpperCAmelCase__ : List[Any] =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
UpperCAmelCase__ : Optional[int] ={
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
UpperCAmelCase__ : Optional[Any] =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
UpperCAmelCase__ : Dict =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 358 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCAmelCase__ : Union[str, Any] =logging.getLogger(__name__)
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
return (preds == labels).mean()
@dataclass
class __A :
__A = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__A = field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__A = field(
default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__A = field(
default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __A :
__A = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
__A = field(metadata={"""help""": """Should contain the data files for the task."""} )
__A = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__A = field(
default=a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _lowercase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase , lowerCamelCase , lowerCamelCase =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , _UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
try:
lowerCamelCase =processors[data_args.task_name]()
lowerCamelCase =processor.get_labels()
lowerCamelCase =len(_UpperCAmelCase )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowerCamelCase =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCamelCase =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCamelCase =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCamelCase =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_UpperCAmelCase ) -> Dict:
lowerCamelCase =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_UpperCAmelCase , p.label_ids )}
# Data collator
lowerCamelCase =DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCamelCase =Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , compute_metrics=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase ={}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCamelCase =trainer.evaluate()
lowerCamelCase =os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(_UpperCAmelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , _UpperCAmelCase , _UpperCAmelCase )
writer.write("""%s = %s\n""" % (key, value) )
results.update(_UpperCAmelCase )
return results
def _lowercase ( _UpperCAmelCase ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 262 | 0 |
from __future__ import annotations
def UpperCamelCase ( _A ): # This function is recursive
"""simple docstring"""
__magic_name__ : str = len(_A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__magic_name__ : Dict = array[0]
__magic_name__ : Optional[Any] = False
__magic_name__ : Tuple = 1
__magic_name__ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
__magic_name__ : Union[str, Any] = True
__magic_name__ : List[Any] = [element for element in array[i:] if element >= array[i]]
__magic_name__ : Dict = longest_subsequence(_A )
if len(_A ) > len(_A ):
__magic_name__ : Tuple = temp_array
else:
i += 1
__magic_name__ : Any = [element for element in array[1:] if element >= pivot]
__magic_name__ : Dict = [pivot, *longest_subsequence(_A )]
if len(_A ) > len(_A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Optional[Any] = MgpstrTokenizer
lowercase__ : int = False
lowercase__ : Any = {}
lowercase__ : Optional[int] = False
def __magic_name__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
__magic_name__ : List[str] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__magic_name__ : List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Optional[int]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : List[str] = """tester"""
__magic_name__ : int = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def __magic_name__ ( self ) -> str:
pass
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : List[Any] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : Dict = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__magic_name__ : List[str] = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
__magic_name__ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ ,__magic_name__ : Optional[Any] = self.get_input_output_texts(lowerCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
__magic_name__ : Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertNotEqual(len(lowerCAmelCase__ ) , 0 )
__magic_name__ : Optional[int] = tokenizer.decode(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(text_a.replace(""" """ , """""" ) , lowerCAmelCase__ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def __magic_name__ ( self ) -> Tuple:
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def __magic_name__ ( self ) -> Optional[Any]:
pass
| 342 | 1 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Any = StableDiffusionDiffEditPipeline
snake_case__ : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
snake_case__ : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
snake_case__ : Any = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case__ : int = frozenset([])
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=__lowerCAmelCase , )
_lowerCamelCase : List[str] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
_lowerCamelCase : int = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__lowerCAmelCase , set_alpha_to_zero=__lowerCAmelCase , )
torch.manual_seed(0 )
_lowerCamelCase : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_lowerCamelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_lowerCamelCase : Tuple = CLIPTextModel(__lowerCAmelCase )
_lowerCamelCase : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_lowerCamelCase : List[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any]=0 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = floats_tensor((1, 1_6, 1_6) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 2, 4, 1_6, 1_6) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
if str(__lowerCAmelCase ).startswith('''mps''' ):
_lowerCamelCase : str = torch.manual_seed(__lowerCAmelCase )
else:
_lowerCamelCase : Dict = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str]=0 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCamelCase : Union[str, Any] = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert('''RGB''' )
if str(__lowerCAmelCase ).startswith('''mps''' ):
_lowerCamelCase : List[str] = torch.manual_seed(__lowerCAmelCase )
else:
_lowerCamelCase : str = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=0 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCamelCase : Optional[int] = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert('''RGB''' )
if str(__lowerCAmelCase ).startswith('''mps''' ):
_lowerCamelCase : Union[str, Any] = torch.manual_seed(__lowerCAmelCase )
else:
_lowerCamelCase : Any = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCamelCase : Tuple = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
_lowerCamelCase : Dict = self.get_dummy_components()
_lowerCamelCase : str = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
_lowerCamelCase : int = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowerCAmelCase , __lowerCAmelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
_lowerCamelCase : List[Any] = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : Any = pipe_loaded(**__lowerCAmelCase )[0]
_lowerCamelCase : int = np.abs(output - output_loaded ).max()
self.assertLess(__lowerCAmelCase , 1E-4 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = '''cpu'''
_lowerCamelCase : List[str] = self.get_dummy_components()
_lowerCamelCase : Tuple = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Dict = self.get_dummy_mask_inputs(__lowerCAmelCase )
_lowerCamelCase : Dict = pipe.generate_mask(**__lowerCAmelCase )
_lowerCamelCase : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 1_6, 1_6) )
_lowerCamelCase : int = np.array([0] * 9 )
_lowerCamelCase : Union[str, Any] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = '''cpu'''
_lowerCamelCase : Any = self.get_dummy_components()
_lowerCamelCase : Dict = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = self.get_dummy_inversion_inputs(__lowerCAmelCase )
_lowerCamelCase : Tuple = pipe.invert(**__lowerCAmelCase ).images
_lowerCamelCase : Optional[int] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
_lowerCamelCase : str = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
_lowerCamelCase : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = '''cpu'''
_lowerCamelCase : Optional[int] = self.get_dummy_components()
_lowerCamelCase : List[Any] = {'''beta_start''': 0.0_00_85, '''beta_end''': 0.0_12, '''beta_schedule''': '''scaled_linear'''}
_lowerCamelCase : List[Any] = DPMSolverMultistepScheduler(**__lowerCAmelCase )
_lowerCamelCase : List[Any] = DPMSolverMultistepInverseScheduler(**__lowerCAmelCase )
_lowerCamelCase : Dict = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.get_dummy_inversion_inputs(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = pipe.invert(**__lowerCAmelCase ).images
_lowerCamelCase : Any = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
_lowerCamelCase : Dict = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
_lowerCamelCase : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3 )
@require_torch_gpu
@slow
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str ):
"""simple docstring"""
_lowerCamelCase : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
_lowerCamelCase : Dict = raw_image.convert('''RGB''' ).resize((7_6_8, 7_6_8) )
_lowerCamelCase : Dict = raw_image
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa )
_lowerCamelCase : Union[str, Any] = DDIMScheduler.from_config(pipe.scheduler.config )
_lowerCamelCase : Any = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Any = '''a bowl of fruit'''
_lowerCamelCase : int = '''a bowl of pears'''
_lowerCamelCase : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=__lowerCAmelCase , target_prompt=__lowerCAmelCase , generator=__lowerCAmelCase , )
_lowerCamelCase : Optional[int] = pipe.invert(
prompt=__lowerCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__lowerCAmelCase ).latents
_lowerCamelCase : str = pipe(
prompt=__lowerCAmelCase , mask_image=__lowerCAmelCase , image_latents=__lowerCAmelCase , generator=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
_lowerCamelCase : str = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5E-1
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
_lowerCamelCase : str = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa )
_lowerCamelCase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_lowerCamelCase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = '''a bowl of fruit'''
_lowerCamelCase : Union[str, Any] = '''a bowl of pears'''
_lowerCamelCase : int = pipe.generate_mask(
image=self.raw_image , source_prompt=__lowerCAmelCase , target_prompt=__lowerCAmelCase , generator=__lowerCAmelCase , )
_lowerCamelCase : int = pipe.invert(
prompt=__lowerCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__lowerCAmelCase , num_inference_steps=2_5 , ).latents
_lowerCamelCase : Dict = pipe(
prompt=__lowerCAmelCase , mask_image=__lowerCAmelCase , image_latents=__lowerCAmelCase , generator=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , inpaint_strength=0.7 , num_inference_steps=2_5 , output_type='''numpy''' , ).images[0]
_lowerCamelCase : List[str] = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 175 |
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
lowerCAmelCase__ = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def snake_case_ ( A_ : int, A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
_lowerCamelCase : Union[str, Any] = int(re.match(R'''.*layer_(\d*).*''', A_ )[1] )
layer_number -= 3
return F'''h.{layer_number}.''' + key
def snake_case_ ( A_ : List[Any] ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
_lowerCamelCase : List[str] = re.search(R'''[^\d](\d+)$''', str(A_ ) )
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' )
_lowerCamelCase : Optional[Any] = int(bit_search.groups()[0] )
return bit_size // 8
def snake_case_ ( A_ : str, A_ : Any, A_ : int, A_ : List[str], A_ : Any ):
'''simple docstring'''
if bloom_config_file == "":
_lowerCamelCase : Dict = BloomConfig()
else:
_lowerCamelCase : Any = BloomConfig.from_json_file(A_ )
if shard_model:
_lowerCamelCase : Optional[int] = os.listdir(A_ )
_lowerCamelCase : List[str] = sorted(filter(lambda A_ : s.startswith('''layer''' ) and "model_00" in s, A_ ) )
_lowerCamelCase : str = {'''weight_map''': {}, '''metadata''': {}}
_lowerCamelCase : List[str] = 0
_lowerCamelCase : str = None
_lowerCamelCase : str = BloomConfig()
for j, file in enumerate(A_ ):
print('''Processing file: {}'''.format(A_ ) )
_lowerCamelCase : List[Any] = None
for i in range(A_ ):
# load all TP files
_lowerCamelCase : Any = file.replace('''model_00''', F'''model_0{i}''' )
_lowerCamelCase : Any = torch.load(os.path.join(A_, A_ ), map_location='''cpu''' )
# Rename keys in the transformers names
_lowerCamelCase : Optional[Any] = list(temp.keys() )
for key in keys:
_lowerCamelCase : List[Any] = temp.pop(A_ )
if tensors is None:
_lowerCamelCase : Any = temp
else:
for key in tensors.keys():
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_lowerCamelCase : List[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_lowerCamelCase : Optional[Any] = torch.cat([tensors[key], temp[key]], dim=A_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_lowerCamelCase : Optional[Any] = tensors[key] / pretraining_tp
torch.save(
A_, os.path.join(
A_, '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ), str(len(A_ ) ).zfill(5 ) ), ), )
for key in tensors.keys():
_lowerCamelCase : str = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
_lowerCamelCase : Tuple = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ), str(len(A_ ) ).zfill(5 ) )
_lowerCamelCase : List[Any] = BloomConfig()
_lowerCamelCase : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
_lowerCamelCase : Union[str, Any] = total_size
with open(A_, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(A_, WEIGHTS_NAME + '''.index.json''' ), '''w''', encoding='''utf-8''' ) as f:
_lowerCamelCase : Any = json.dumps(A_, indent=2, sort_keys=A_ ) + '''\n'''
f.write(A_ )
else:
_lowerCamelCase : Tuple = BloomModel(A_ )
_lowerCamelCase : Optional[int] = os.listdir(A_ )
_lowerCamelCase : Union[str, Any] = sorted(filter(lambda A_ : s.startswith('''layer''' ) and "model_00" in s, A_ ) )
_lowerCamelCase : int = None
for i, file in enumerate(A_ ):
_lowerCamelCase : Optional[int] = None
for i in range(A_ ):
# load all TP files
_lowerCamelCase : str = file.replace('''model_00''', F'''model_0{i}''' )
_lowerCamelCase : List[Any] = torch.load(os.path.join(A_, A_ ), map_location='''cpu''' )
# Rename keys in the transformers names
_lowerCamelCase : List[Any] = list(temp.keys() )
for key in keys:
_lowerCamelCase : Dict = temp.pop(A_ )
if tensors is None:
_lowerCamelCase : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_lowerCamelCase : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_lowerCamelCase : Optional[Any] = torch.cat([tensors[key], temp[key]], dim=A_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_lowerCamelCase : List[Any] = tensors[key] / pretraining_tp
_lowerCamelCase : List[str] = model.load_state_dict(A_, strict=A_ )
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
_lowerCamelCase : Optional[Any] = set(other_keys.missing_keys )
else:
_lowerCamelCase : Union[str, Any] = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(A_, exist_ok=A_ )
_lowerCamelCase : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
_lowerCamelCase : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
_lowerCamelCase : Dict = model.to(config.torch_dtype )
torch.save(model.state_dict(), A_ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(A_, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
lowerCAmelCase__ = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 175 | 1 |
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : int ):
return abs(snake_case_ ) if a == 0 else greatest_common_divisor(b % a ,snake_case_ )
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : int ):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
__lowerCamelCase = y, x % y
return abs(snake_case_ )
def a__ ( ):
try:
__lowerCamelCase = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
__lowerCamelCase = int(nums[0] )
__lowerCamelCase = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(snake_case_ ,snake_case_ )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(snake_case_ ,snake_case_ )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 330 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = 42
# setable values
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = None
@classmethod
def UpperCamelCase__( cls , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
return cls(common=__lowerCamelCase , init_noise_sigma=__lowerCamelCase , timesteps=__lowerCamelCase )
@dataclass
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
class __snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
_lowerCamelCase = 42
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return True
@register_to_config
def __init__( self , __lowerCamelCase = 1000 , __lowerCamelCase = 0.0_0_0_1 , __lowerCamelCase = 0.0_2 , __lowerCamelCase = "linear" , __lowerCamelCase = None , __lowerCamelCase = "fixed_small" , __lowerCamelCase = True , __lowerCamelCase = "epsilon" , __lowerCamelCase = jnp.floataa , ):
'''simple docstring'''
__A : Tuple = dtype
def UpperCamelCase__( self , __lowerCamelCase = None ):
'''simple docstring'''
if common is None:
__A : Tuple = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__A : Tuple = jnp.array(1.0 , dtype=self.dtype )
__A : Optional[int] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__lowerCamelCase , init_noise_sigma=__lowerCamelCase , timesteps=__lowerCamelCase , )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
return sample
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = () ):
'''simple docstring'''
__A : Optional[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__A : Optional[Any] = (jnp.arange(0 , __lowerCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__lowerCamelCase , timesteps=__lowerCamelCase , )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ):
'''simple docstring'''
__A : int = state.common.alphas_cumprod[t]
__A : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__A : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__A : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__A : List[Any] = jnp.clip(__lowerCamelCase , a_min=1e-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__A : Optional[Any] = jnp.log(jnp.clip(__lowerCamelCase , a_min=1e-2_0 ) )
elif variance_type == "fixed_large":
__A : Tuple = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__A : Union[str, Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__A : Optional[Any] = variance
__A : Optional[Any] = state.common.betas[t]
__A : Any = (predicted_variance + 1) / 2
__A : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True , ):
'''simple docstring'''
__A : Optional[int] = timestep
if key is None:
__A : List[Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__A , __A : Tuple = jnp.split(__lowerCamelCase , sample.shape[1] , axis=1 )
else:
__A : List[str] = None
# 1. compute alphas, betas
__A : Dict = state.common.alphas_cumprod[t]
__A : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__A : Tuple = 1 - alpha_prod_t
__A : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__A : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__A : Any = model_output
elif self.config.prediction_type == "v_prediction":
__A : str = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__A : str = jnp.clip(__lowerCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__A : Optional[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__A : Union[str, Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__A : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__A : List[Any] = jax.random.split(__lowerCamelCase , num=1 )
__A : List[str] = jax.random.normal(__lowerCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__lowerCamelCase , __lowerCamelCase , predicted_variance=__lowerCamelCase ) ** 0.5) * noise
__A : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__A : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__lowerCamelCase , state=__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
return add_noise_common(state.common , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
return get_velocity_common(state.common , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 179 | 0 |
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
while a != 0:
_UpperCAmelCase , _UpperCAmelCase = b % a, a
return b
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
if gcd(_UpperCAmelCase , _UpperCAmelCase ) != 1:
_UpperCAmelCase = F"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(_UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1, 0, a
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0, 1, m
while va != 0:
_UpperCAmelCase = ua // va
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 290 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290 | 1 |
"""simple docstring"""
_A : Union[str, Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def __magic_name__ ( __snake_case : int ) -> Union[str, Any]:
lowercase : Union[str, Any] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_A : List[str] = [None] * 10_00_00_00
_A : Optional[Any] = True
_A : List[Any] = False
def __magic_name__ ( __snake_case : int ) -> Tuple:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowercase : List[Any] = chain(next_number(__a ) )
lowercase : Union[str, Any] = number_chain
while number < 1000_0000:
lowercase : Optional[Any] = number_chain
number *= 10
return number_chain
def __magic_name__ ( __snake_case : int = 1000_0000 ) -> Union[str, Any]:
for i in range(1 , __a ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }")
| 202 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__lowerCAmelCase = HUGGINGFACE_HUB_CACHE
__lowerCAmelCase = """config.json"""
__lowerCAmelCase = """diffusion_pytorch_model.bin"""
__lowerCAmelCase = """diffusion_flax_model.msgpack"""
__lowerCAmelCase = """model.onnx"""
__lowerCAmelCase = """diffusion_pytorch_model.safetensors"""
__lowerCAmelCase = """weights.pb"""
__lowerCAmelCase = """https://huggingface.co"""
__lowerCAmelCase = default_cache_path
__lowerCAmelCase = """diffusers_modules"""
__lowerCAmelCase = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
__lowerCAmelCase = ["""fp16""", """non-ema"""]
__lowerCAmelCase = """.self_attn"""
| 271 | 0 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class _lowerCamelCase :
def __init__(self ) -> None:
UpperCamelCase = [2, 1, 2, -1]
UpperCamelCase = [1, 2, 3, 4]
def snake_case_ (self ) -> list[float]:
UpperCamelCase = len(self.first_signal )
UpperCamelCase = len(self.second_signal )
UpperCamelCase = max(__a , __a )
# create a zero matrix of max_length x max_length
UpperCamelCase = [[0] * max_length for i in range(__a )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__a ):
UpperCamelCase = deque(self.second_signal )
rotated_signal.rotate(__a )
for j, item in enumerate(__a ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCamelCase = np.matmul(np.transpose(__a ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__a , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 244 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowerCAmelCase__ = {
'''facebook/mbart-large-en-ro''': 1_024,
'''facebook/mbart-large-cc25''': 1_024,
}
# fmt: off
lowerCAmelCase__ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = ["input_ids", "attention_mask"]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
def __init__(self , __a , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a=None , __a=None , __a=None , __a = None , __a=None , **__a , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , tokenizer_file=__a , src_lang=__a , tgt_lang=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase = 1
UpperCamelCase = len(self.sp_model )
UpperCamelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__a )
}
UpperCamelCase = {v: k for k, v in self.lang_code_to_id.items()}
UpperCamelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCamelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCamelCase = src_lang if src_lang is not None else "en_XX"
UpperCamelCase = self.lang_code_to_id[self._src_lang]
UpperCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__(self ) -> Any:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__(self , __a ) -> Tuple:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def snake_case_ (self ) -> Any:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def snake_case_ (self ) -> str:
return self._src_lang
@src_lang.setter
def snake_case_ (self , __a ) -> None:
UpperCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case_ (self , __a , __a = None , __a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
UpperCamelCase = [1] * len(self.prefix_tokens )
UpperCamelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def snake_case_ (self , __a , __a = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case_ (self , __a , __a = None ) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ (self , __a , __a , __a , __a , **__a ) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCamelCase = src_lang
UpperCamelCase = self(__a , add_special_tokens=__a , return_tensors=__a , **__a )
UpperCamelCase = self.convert_tokens_to_ids(__a )
UpperCamelCase = tgt_lang_id
return inputs
def snake_case_ (self ) -> List[str]:
UpperCamelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ (self , __a ) -> List[str]:
return self.sp_model.encode(__a , out_type=__a )
def snake_case_ (self , __a ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase = self.sp_model.PieceToId(__a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ (self , __a ) -> Optional[int]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ (self , __a ) -> Optional[int]:
UpperCamelCase = "".join(__a ).replace(__a , " " ).strip()
return out_string
def snake_case_ (self , __a , __a = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def snake_case_ (self , __a , __a = "en_XX" , __a = None , __a = "ro_RO" , **__a , ) -> BatchEncoding:
UpperCamelCase = src_lang
UpperCamelCase = tgt_lang
return super().prepare_seqaseq_batch(__a , __a , **__a )
def snake_case_ (self ) -> str:
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case_ (self ) -> Optional[int]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case_ (self , __a ) -> None:
UpperCamelCase = self.lang_code_to_id[src_lang]
UpperCamelCase = []
UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
def snake_case_ (self , __a ) -> None:
UpperCamelCase = self.lang_code_to_id[lang]
UpperCamelCase = []
UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
| 244 | 1 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 7_6_8 , ):
"""simple docstring"""
super().__init__()
A_ : Optional[int] = nn.Parameter(torch.zeros(1 , snake_case_ ) )
A_ : Optional[int] = nn.Parameter(torch.ones(1 , snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ = None , snake_case_ = None , ):
"""simple docstring"""
A_ : str = nn.Parameter(self.mean.to(snake_case_ ).to(snake_case_ ) )
A_ : Optional[int] = nn.Parameter(self.std.to(snake_case_ ).to(snake_case_ ) )
return self
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Tuple = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : List[str] = (embeds * self.std) + self.mean
return embeds | 286 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 7_6_8 , ):
"""simple docstring"""
super().__init__()
A_ : Optional[int] = nn.Parameter(torch.zeros(1 , snake_case_ ) )
A_ : Optional[int] = nn.Parameter(torch.ones(1 , snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ = None , snake_case_ = None , ):
"""simple docstring"""
A_ : str = nn.Parameter(self.mean.to(snake_case_ ).to(snake_case_ ) )
A_ : Optional[int] = nn.Parameter(self.std.to(snake_case_ ).to(snake_case_ ) )
return self
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Tuple = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : List[str] = (embeds * self.std) + self.mean
return embeds | 286 | 1 |
"""simple docstring"""
snake_case__ : int = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
snake_case__ : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def _snake_case ( _snake_case : str ):
lowerCAmelCase : Tuple = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def _snake_case ( _snake_case : str ):
if set(_snake_case ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
lowerCAmelCase : Union[str, Any] = ''''''
for word in coded.split():
while len(_snake_case ) != 0:
decoded += decode_dict[word[:5]]
lowerCAmelCase : Optional[Any] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 364 |
"""simple docstring"""
import numpy as np
from PIL import Image
def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ):
lowerCAmelCase : Dict = np.array(_snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Union[str, Any] = 0
# compute the shape of the output matrix
lowerCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCAmelCase : Dict = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCAmelCase : List[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase : int = 0
lowerCAmelCase : Tuple = 0
return updated_arr
def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ):
lowerCAmelCase : Union[str, Any] = np.array(_snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Any = 0
lowerCAmelCase : int = 0
lowerCAmelCase : int = 0
# compute the shape of the output matrix
lowerCAmelCase : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCAmelCase : Dict = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCAmelCase : Optional[int] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase : str = 0
lowerCAmelCase : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
snake_case__ : Optional[Any] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 314 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A_ = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 64 |
'''simple docstring'''
from __future__ import annotations
import requests
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> dict:
_a : Any =F"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(_UpperCAmelCase ).json()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10 ) -> list[dict]:
_a : Union[str, Any] ="""https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
_a : int =requests.get(_UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10 ) -> str:
_a : Union[str, Any] =hackernews_top_stories(_UpperCAmelCase )
return "\n".join("""* [{title}]({url})""".format(**_UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 276 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : list ) ->list:
'''simple docstring'''
if len(_lowercase ) <= 1:
return [tuple(_lowercase )]
a : Any = []
def generate(_lowercase : int , _lowercase : list ):
a : Any = [0] * n
res.append(tuple(_lowercase ) )
a : List[str] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
a, a : Optional[int] = arr[i], arr[0]
else:
a, a : Union[str, Any] = arr[i], arr[c[i]]
res.append(tuple(_lowercase ) )
c[i] += 1
a : str = 0
else:
a : Optional[Any] = 0
i += 1
generate(len(_lowercase ) , _lowercase )
return res
if __name__ == "__main__":
a : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
a : Dict = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 79 |
"""simple docstring"""
a : Optional[int] = 8.31_4462 # Unit - J mol-1 K-1
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 79 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ = 10 ):
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or n < 0:
raise ValueError("Invalid input" )
__SCREAMING_SNAKE_CASE = 10**n
__SCREAMING_SNAKE_CASE = 2_8433 * (pow(2 , 783_0457 , lowerCAmelCase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(1_0) = }")
| 54 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE :List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :List[str] = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 15 | 0 |
"""simple docstring"""
import sys
import turtle
def A_ ( A__ , A__ ) -> Optional[int]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def A_ ( A__ , A__ , A__ , A__ , ) -> Optional[int]:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCAmelCase_ , get_mid(UpperCAmelCase_ , UpperCAmelCase_ ) , get_mid(UpperCAmelCase_ , UpperCAmelCase_ ) , depth - 1 )
triangle(UpperCAmelCase_ , get_mid(UpperCAmelCase_ , UpperCAmelCase_ ) , get_mid(UpperCAmelCase_ , UpperCAmelCase_ ) , depth - 1 )
triangle(UpperCAmelCase_ , get_mid(UpperCAmelCase_ , UpperCAmelCase_ ) , get_mid(UpperCAmelCase_ , UpperCAmelCase_ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
lowercase : str = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
lowercase : Optional[int] = [(-1_7_5, -1_2_5), (0, 1_7_5), (1_7_5, -1_2_5)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 353 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=0.2 , lowercase=0.2) -> Any:
'''simple docstring'''
a__ : Tuple = bp_numa
a__ : Union[str, Any] = bp_numa
a__ : Optional[int] = bp_numa
a__ : Optional[int] = conva_get[:2]
a__ : Optional[Any] = conva_get[2]
a__ : Optional[int] = size_pa
a__ : Union[str, Any] = rate_w
a__ : Dict = rate_t
a__ : int = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
a__ : Optional[int] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
a__ : Optional[int] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
a__ : Any = -2 * np.random.rand(self.conva[1]) + 1
a__ : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1
a__ : Optional[int] = -2 * np.random.rand(self.num_bpa) + 1
def __lowercase ( self , lowercase) -> List[Any]:
'''simple docstring'''
a__ : Optional[Any] = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(lowercase , 'wb') as f:
pickle.dump(lowercase , lowercase)
print(F'Model saved: {save_path}')
@classmethod
def __lowercase ( cls , lowercase) -> Any:
'''simple docstring'''
with open(lowercase , 'rb') as f:
a__ : Any = pickle.load(lowercase) # noqa: S301
a__ : Dict = model_dic.get('conv1')
conv_get.append(model_dic.get('step_conv1'))
a__ : Tuple = model_dic.get('size_pooling1')
a__ : Optional[int] = model_dic.get('num_bp1')
a__ : Tuple = model_dic.get('num_bp2')
a__ : Optional[Any] = model_dic.get('num_bp3')
a__ : Optional[Any] = model_dic.get('rate_weight')
a__ : int = model_dic.get('rate_thre')
# create model instance
a__ : Union[str, Any] = CNN(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase)
# modify model parameter
a__ : str = model_dic.get('w_conv1')
a__ : Optional[int] = model_dic.get('wkj')
a__ : Tuple = model_dic.get('vji')
a__ : str = model_dic.get('thre_conv1')
a__ : List[str] = model_dic.get('thre_bp2')
a__ : Tuple = model_dic.get('thre_bp3')
return conv_ins
def __lowercase ( self , lowercase) -> Any:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x))
def __lowercase ( self , lowercase) -> Optional[int]:
'''simple docstring'''
return round(lowercase , 3)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase) -> Any:
'''simple docstring'''
a__ : Union[str, Any] = convs[0]
a__ : Tuple = convs[1]
a__ : Any = np.shape(lowercase)[0]
# get the data slice of original image data, data_focus
a__ : Tuple = []
for i_focus in range(0 , size_data - size_conv + 1 , lowercase):
for j_focus in range(0 , size_data - size_conv + 1 , lowercase):
a__ : str = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowercase)
# calculate the feature map of every single kernel, and saved as list of matrix
a__ : str = []
a__ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(lowercase):
a__ : Tuple = []
for i_focus in range(len(lowercase)):
a__ : Optional[Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(lowercase))
a__ : Dict = np.asmatrix(lowercase).reshape(
lowercase , lowercase)
data_featuremap.append(lowercase)
# expanding the data slice to One dimenssion
a__ : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowercase))
a__ : Optional[int] = np.asarray(lowercase)
return focus_list, data_featuremap
def __lowercase ( self , lowercase , lowercase , lowercase="average_pool") -> str:
'''simple docstring'''
a__ : Any = len(featuremaps[0])
a__ : int = int(size_map / size_pooling)
a__ : Optional[Any] = []
for i_map in range(len(lowercase)):
a__ : Any = featuremaps[i_map]
a__ : Optional[int] = []
for i_focus in range(0 , lowercase , lowercase):
for j_focus in range(0 , lowercase , lowercase):
a__ : Any = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowercase))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowercase))
a__ : List[str] = np.asmatrix(lowercase).reshape(lowercase , lowercase)
featuremap_pooled.append(lowercase)
return featuremap_pooled
def __lowercase ( self , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : Any = []
for i in range(len(lowercase)):
a__ : Tuple = np.shape(data[i])
a__ : List[str] = data[i].reshape(1 , shapes[0] * shapes[1])
a__ : Optional[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(lowercase)
a__ : Union[str, Any] = np.asarray(lowercase)
return data_expanded
def __lowercase ( self , lowercase) -> Dict:
'''simple docstring'''
a__ : Dict = np.asarray(lowercase)
a__ : Optional[int] = np.shape(lowercase)
a__ : Any = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__ : int = []
a__ : Optional[int] = 0
for i_map in range(lowercase):
a__ : Optional[Any] = np.ones((size_map, size_map))
for i in range(0 , lowercase , lowercase):
for j in range(0 , lowercase , lowercase):
a__ : Union[str, Any] = pd_pool[
i_pool
]
a__ : Tuple = i_pool + 1
a__ : Optional[int] = np.multiply(
lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(lowercase)
return pd_all
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=bool) -> str:
'''simple docstring'''
print('----------------------Start Training-------------------------')
print((' - - Shape: Train_Data ', np.shape(lowercase)))
print((' - - Shape: Teach_Data ', np.shape(lowercase)))
a__ : Dict = 0
a__ : List[Any] = []
a__ : Optional[int] = 1_0000
while rp < n_repeat and mse >= error_accuracy:
a__ : Dict = 0
print(F'-------------Learning Time {rp}--------------')
for p in range(len(lowercase)):
# print('------------Learning Image: %d--------------'%p)
a__ : Dict = np.asmatrix(datas_train[p])
a__ : Any = np.asarray(datas_teach[p])
a__ , a__ : Optional[int] = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a__ : Dict = self.pooling(lowercase , self.size_poolinga)
a__ : Optional[Any] = np.shape(lowercase)
a__ : Union[str, Any] = self._expand(lowercase)
a__ : List[Any] = data_bp_input
a__ : Tuple = np.dot(lowercase , self.vji.T) - self.thre_bpa
a__ : Any = self.sig(lowercase)
a__ : Any = np.dot(lowercase , self.wkj.T) - self.thre_bpa
a__ : Any = self.sig(lowercase)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
a__ : Any = np.multiply(
(data_teach - bp_outa) , np.multiply(lowercase , (1 - bp_outa)))
a__ : Optional[Any] = np.multiply(
np.dot(lowercase , self.wkj) , np.multiply(lowercase , (1 - bp_outa)))
a__ : Tuple = np.dot(lowercase , self.vji)
a__ : Union[str, Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
a__ : List[str] = pd_conva_pooled.T.getA().tolist()
a__ : str = self._calculate_gradient_from_pool(
lowercase , lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
a__ : Optional[int] = self._expand_mat(pd_conva_all[k_conv])
a__ : int = self.rate_weight * np.dot(lowercase , lowercase)
a__ : List[str] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
a__ : str = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
a__ : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
a__ : List[str] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
a__ : Tuple = self.thre_bpa - pd_k_all * self.rate_thre
a__ : Tuple = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
a__ : List[str] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
a__ : Any = rp + 1
a__ : Optional[Any] = error_count / patterns
all_mse.append(lowercase)
def draw_error():
a__ : int = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(lowercase , '+-')
plt.plot(lowercase , 'r--')
plt.xlabel('Learning Times')
plt.ylabel('All_mse')
plt.grid(lowercase , alpha=0.5)
plt.show()
print('------------------Training Complished---------------------')
print((' - - Training epoch: ', rp, F' - - Mse: {mse:.6f}'))
if draw_e:
draw_error()
return mse
def __lowercase ( self , lowercase) -> Optional[int]:
'''simple docstring'''
a__ : str = []
print('-------------------Start Testing-------------------------')
print((' - - Shape: Test_Data ', np.shape(lowercase)))
for p in range(len(lowercase)):
a__ : int = np.asmatrix(datas_test[p])
a__ , a__ : Optional[int] = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a__ : str = self.pooling(lowercase , self.size_poolinga)
a__ : Optional[int] = self._expand(lowercase)
a__ : str = data_bp_input
a__ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
a__ : Optional[Any] = self.sig(lowercase)
a__ : int = bp_outa * self.wkj.T - self.thre_bpa
a__ : Dict = self.sig(lowercase)
produce_out.extend(bp_outa.getA().tolist())
a__ : List[Any] = [list(map(self.do_round , lowercase)) for each in produce_out]
return np.asarray(lowercase)
def __lowercase ( self , lowercase) -> str:
'''simple docstring'''
a__ : str = np.asmatrix(lowercase)
a__ , a__ : str = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a__ : List[str] = self.pooling(lowercase , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 225 | 0 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def lowerCamelCase__ ( a ) -> str:
_A: str = VideoMAEConfig()
set_architecture_configs(a , a )
if "finetuned" not in model_name:
_A: Union[str, Any] = False
if "finetuned" in model_name:
_A: Union[str, Any] = '''huggingface/label-files'''
if "kinetics" in model_name:
_A: str = 4_00
_A: Tuple = '''kinetics400-id2label.json'''
elif "ssv2" in model_name:
_A: Union[str, Any] = 1_74
_A: Tuple = '''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
_A: Dict = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
_A: Tuple = {int(a ): v for k, v in idalabel.items()}
_A: Dict = idalabel
_A: List[str] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( a , a ) -> Union[str, Any]:
if "small" in model_name:
_A: Dict = 3_84
_A: List[str] = 15_36
_A: Union[str, Any] = 12
_A: Any = 16
_A: str = 12
_A: int = 3
_A: Tuple = 1_92
_A: Union[str, Any] = 7_68
elif "large" in model_name:
_A: Dict = 10_24
_A: int = 40_96
_A: str = 24
_A: str = 16
_A: Any = 12
_A: str = 8
_A: List[Any] = 5_12
_A: Dict = 20_48
elif "huge" in model_name:
_A: Any = 12_80
_A: Optional[Any] = 51_20
_A: Optional[int] = 32
_A: Any = 16
_A: Union[str, Any] = 12
_A: List[Any] = 8
_A: Tuple = 6_40
_A: Optional[Any] = 25_60
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def lowerCamelCase__ ( a ) -> Union[str, Any]:
if "encoder." in name:
_A: str = name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
_A: Optional[Any] = name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
_A: int = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
_A: List[Any] = name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
_A: List[str] = name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_A: Any = name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
_A: Optional[Any] = name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
_A: Dict = name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
_A: Union[str, Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
_A: Dict = name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
_A: Any = name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
_A: Tuple = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_A: Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_A: Optional[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_A: Union[str, Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
_A: List[Any] = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
_A: List[str] = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
_A: Dict = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
_A: Optional[Any] = name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
_A: List[str] = name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
_A: Tuple = name.replace('''head''' , '''classifier''' )
return name
def lowerCamelCase__ ( a , a ) -> Any:
for key in orig_state_dict.copy().keys():
_A: Optional[int] = orig_state_dict.pop(a )
if key.startswith('''encoder.''' ):
_A: str = key.replace('''encoder.''' , '''''' )
if "qkv" in key:
_A: List[str] = key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
_A: List[Any] = config.decoder_hidden_size
_A: Any = int(key_split[2] )
_A: Union[str, Any] = '''decoder.decoder_layers.'''
if "weight" in key:
_A: List[Any] = val[:dim, :]
_A: Any = val[dim : dim * 2, :]
_A: Optional[int] = val[-dim:, :]
else:
_A: Union[str, Any] = config.hidden_size
_A: Optional[Any] = int(key_split[1] )
_A: str = '''videomae.encoder.layer.'''
if "weight" in key:
_A: Optional[Any] = val[:dim, :]
_A: Dict = val[dim : dim * 2, :]
_A: Dict = val[-dim:, :]
else:
_A: Optional[int] = val
return orig_state_dict
def lowerCamelCase__ ( ) -> Dict:
_A: List[Any] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
_A: Optional[Any] = np.load(a )
return list(a )
def lowerCamelCase__ ( a , a , a , a ) -> Optional[Any]:
_A: str = get_videomae_config(a )
if "finetuned" in model_name:
_A: Dict = VideoMAEForVideoClassification(a )
else:
_A: Optional[Any] = VideoMAEForPreTraining(a )
# download original checkpoint, hosted on Google Drive
_A: List[Any] = '''pytorch_model.bin'''
gdown.cached_download(a , a , quiet=a )
_A: Any = torch.load(a , map_location='''cpu''' )
if "model" in files:
_A: Any = files['''model''']
else:
_A: int = files['''module''']
_A: Optional[Any] = convert_state_dict(a , a )
model.load_state_dict(a )
model.eval()
# verify model on basic input
_A: Optional[Any] = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
_A: Optional[int] = prepare_video()
_A: List[Any] = image_processor(a , return_tensors='''pt''' )
if "finetuned" not in model_name:
_A: int = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
_A: Optional[Any] = torch.load(a )
_A: Any = model(**a )
_A: Optional[Any] = outputs.logits
_A: Tuple = [
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
_A: Optional[Any] = torch.Size([1, 4_00] )
_A: Dict = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
_A: str = torch.Size([1, 1_74] )
_A: str = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
_A: Optional[int] = torch.Size([1, 14_08, 15_36] )
_A: Tuple = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
_A: List[Any] = torch.Size([1, 14_08, 15_36] )
_A: Any = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
_A: str = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
_A: int = torch.Size([1, 14_08, 15_36] )
_A: Tuple = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
_A: Tuple = torch.Size([1, 4_00] )
_A: List[Any] = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
_A: List[str] = torch.Size([1, 4_00] )
_A: List[Any] = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
_A: List[Any] = torch.Size([1, 4_00] )
_A: List[Any] = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
_A: int = torch.Size([1, 4_00] )
_A: int = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
_A: Tuple = torch.Size([1, 14_08, 15_36] )
_A: List[Any] = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
_A: Tuple = torch.Size([1, 1_74] )
_A: int = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
_A: str = torch.Size([1, 14_08, 15_36] )
_A: int = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
_A: Optional[int] = torch.Size([1, 1_74] )
_A: Dict = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(f"""Model name not supported. Should be one of {model_names}""" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , a , atol=1E-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , a , atol=1E-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
_A: int = outputs.loss
assert torch.allclose(a , a , atol=1E-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a )
model.save_pretrained(a )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(a , organization='''nielsr''' )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase__ : Union[str, Any] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 121 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowerCamelCase__ ( a = True , *a , **a ) -> Optional[Any]:
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
_A: Optional[Any] = False
if main_process_only:
_A: Union[str, Any] = PartialState().local_process_index == 0
return _tqdm(*a , **a , disable=a )
| 121 | 1 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class snake_case_ (unittest.TestCase ):
def __init__( self :Optional[Any] ,__snake_case :str ,__snake_case :Any=7 ,__snake_case :Dict=3 ,__snake_case :str=18 ,__snake_case :int=30 ,__snake_case :Tuple=4_00 ,__snake_case :Optional[Any]=True ,__snake_case :Any=None ,__snake_case :int=True ,__snake_case :Tuple=None ,__snake_case :List[str]=True ,__snake_case :Any=[0.5, 0.5, 0.5] ,__snake_case :Union[str, Any]=[0.5, 0.5, 0.5] ,__snake_case :List[Any]=False ,) -> Tuple:
a__ = size if size is not None else {'height': 20, 'width': 20}
a__ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
a__ = parent
a__ = batch_size
a__ = num_channels
a__ = image_size
a__ = min_resolution
a__ = max_resolution
a__ = do_resize
a__ = size
a__ = do_center_crop
a__ = crop_size
a__ = do_normalize
a__ = image_mean
a__ = image_std
a__ = do_reduce_labels
def lowerCamelCase__( self :Any ) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __lowercase ( ):
a__ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
a__ = Image.open(dataset[0]['file'] )
a__ = Image.open(dataset[1]['file'] )
return image, map
def __lowercase ( ):
a__ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
a__ = Image.open(ds[0]['file'] )
a__ = Image.open(ds[1]['file'] )
a__ = Image.open(ds[2]['file'] )
a__ = Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : int = BeitImageProcessor if is_vision_available() else None
def lowerCamelCase__( self :List[str] ) -> Optional[int]:
a__ = BeitImageProcessingTester(self )
@property
def lowerCamelCase__( self :Dict ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__( self :int ) -> List[str]:
a__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case ,'do_resize' ) )
self.assertTrue(hasattr(__snake_case ,'size' ) )
self.assertTrue(hasattr(__snake_case ,'do_center_crop' ) )
self.assertTrue(hasattr(__snake_case ,'center_crop' ) )
self.assertTrue(hasattr(__snake_case ,'do_normalize' ) )
self.assertTrue(hasattr(__snake_case ,'image_mean' ) )
self.assertTrue(hasattr(__snake_case ,'image_std' ) )
def lowerCamelCase__( self :List[str] ) -> List[Any]:
a__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 20, 'width': 20} )
self.assertEqual(image_processor.crop_size ,{'height': 18, 'width': 18} )
self.assertEqual(image_processor.do_reduce_labels ,__snake_case )
a__ = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,crop_size=84 ,reduce_labels=__snake_case )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} )
self.assertEqual(image_processor.do_reduce_labels ,__snake_case )
def lowerCamelCase__( self :Union[str, Any] ) -> List[Any]:
pass
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case ,Image.Image )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
a__ = image_processing(__snake_case ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def lowerCamelCase__( self :Tuple ) -> List[Any]:
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__snake_case ,numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case ,np.ndarray )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
a__ = image_processing(__snake_case ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def lowerCamelCase__( self :List[Any] ) -> str:
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__snake_case ,torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case ,torch.Tensor )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
a__ = image_processing(__snake_case ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def lowerCamelCase__( self :Dict ) -> List[str]:
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__snake_case ,torchify=__snake_case )
a__ = []
for image in image_inputs:
self.assertIsInstance(__snake_case ,torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
a__ = image_processing(image_inputs[0] ,maps[0] ,return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(
encoding['labels'].shape ,(
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(encoding['labels'].dtype ,torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_55 )
# Test batched
a__ = image_processing(__snake_case ,__snake_case ,return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(
encoding['labels'].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(encoding['labels'].dtype ,torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_55 )
# Test not batched input (PIL images)
a__ , a__ = prepare_semantic_single_inputs()
a__ = image_processing(__snake_case ,__snake_case ,return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(
encoding['labels'].shape ,(
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(encoding['labels'].dtype ,torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_55 )
# Test batched input (PIL images)
a__ , a__ = prepare_semantic_batch_inputs()
a__ = image_processing(__snake_case ,__snake_case ,return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape ,(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(
encoding['labels'].shape ,(
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
self.assertEqual(encoding['labels'].dtype ,torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_55 )
def lowerCamelCase__( self :Tuple ) -> Tuple:
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
a__ , a__ = prepare_semantic_single_inputs()
a__ = image_processing(__snake_case ,__snake_case ,return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 1_50 )
a__ = True
a__ = image_processing(__snake_case ,__snake_case ,return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_55 )
| 109 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case : str = logging.get_logger(__name__)
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] ):
a__ = b.T
a__ = np.sum(np.square(__lowerCAmelCase ) , axis=1 )
a__ = np.sum(np.square(__lowerCAmelCase ) , axis=0 )
a__ = np.matmul(__lowerCAmelCase , __lowerCAmelCase )
a__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str ):
a__ = x.reshape(-1 , 3 )
a__ = squared_euclidean_distance(__lowerCAmelCase , __lowerCAmelCase )
return np.argmin(__lowerCAmelCase , axis=1 )
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = ['''pixel_values''']
def __init__( self :Dict ,__snake_case :Optional[Union[List[List[int]], np.ndarray]] = None ,__snake_case :bool = True ,__snake_case :Dict[str, int] = None ,__snake_case :PILImageResampling = PILImageResampling.BILINEAR ,__snake_case :bool = True ,__snake_case :bool = True ,**__snake_case :Optional[int] ,) -> None:
super().__init__(**__snake_case )
a__ = size if size is not None else {'height': 2_56, 'width': 2_56}
a__ = get_size_dict(__snake_case )
a__ = np.array(__snake_case ) if clusters is not None else None
a__ = do_resize
a__ = size
a__ = resample
a__ = do_normalize
a__ = do_color_quantize
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :Dict[str, int] ,__snake_case :PILImageResampling = PILImageResampling.BILINEAR ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :Any ,) -> np.ndarray:
a__ = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
__snake_case ,size=(size['height'], size['width']) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :List[Any] ,__snake_case :np.ndarray ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,) -> np.ndarray:
a__ = rescale(image=__snake_case ,scale=1 / 1_27.5 ,data_format=__snake_case )
a__ = image - 1
return image
def lowerCamelCase__( self :Optional[Any] ,__snake_case :ImageInput ,__snake_case :bool = None ,__snake_case :Dict[str, int] = None ,__snake_case :PILImageResampling = None ,__snake_case :bool = None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[List[List[int]], np.ndarray]] = None ,__snake_case :Optional[Union[str, TensorType]] = None ,__snake_case :Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST ,**__snake_case :List[str] ,) -> PIL.Image.Image:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = size if size is not None else self.size
a__ = get_size_dict(__snake_case )
a__ = resample if resample is not None else self.resample
a__ = do_normalize if do_normalize is not None else self.do_normalize
a__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
a__ = clusters if clusters is not None else self.clusters
a__ = np.array(__snake_case )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
a__ = [self.resize(image=__snake_case ,size=__snake_case ,resample=__snake_case ) for image in images]
if do_normalize:
a__ = [self.normalize(image=__snake_case ) for image in images]
if do_color_quantize:
a__ = [to_channel_dimension_format(__snake_case ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
a__ = np.array(__snake_case )
a__ = color_quantize(__snake_case ,__snake_case ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
a__ = images.shape[0]
a__ = images.reshape(__snake_case ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
a__ = list(__snake_case )
else:
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'input_ids': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 109 | 1 |
import operator as op
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowerCAmelCase : List[str] = []
lowerCAmelCase : Dict = lambda _UpperCAmelCase, _UpperCAmelCase : int(x / y ) # noqa: E731 integer division operation
lowerCAmelCase : Union[str, Any] = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ), 'Action'.center(12 ), 'Stack', sep=' | ' )
print('-' * (30 + len(_UpperCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_UpperCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ), ('push(' + x + ')').ljust(12 ), ','.join(_UpperCAmelCase ), sep=' | ' )
else:
lowerCAmelCase : str = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ), ('pop(' + b + ')').ljust(12 ), ','.join(_UpperCAmelCase ), sep=' | ' )
lowerCAmelCase : Optional[int] = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ), ('pop(' + a + ')').ljust(12 ), ','.join(_UpperCAmelCase ), sep=' | ' )
stack.append(
str(opr[x](int(_UpperCAmelCase ), int(_UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ), ('push(' + a + x + b + ')').ljust(12 ), ','.join(_UpperCAmelCase ), sep=' | ', )
return int(stack[0] )
if __name__ == "__main__":
__A : int = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 138 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[int]=99 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=37 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Dict=0 , ):
lowerCAmelCase : Any = parent
lowerCAmelCase : int = batch_size
lowerCAmelCase : Optional[int] = seq_length
lowerCAmelCase : Optional[Any] = is_training
lowerCAmelCase : Optional[Any] = use_input_mask
lowerCAmelCase : Union[str, Any] = use_token_type_ids
lowerCAmelCase : Any = use_labels
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : str = hidden_size
lowerCAmelCase : str = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : str = hidden_act
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Any = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : str = type_sequence_label_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Dict = num_labels
lowerCAmelCase : List[Any] = num_choices
lowerCAmelCase : Optional[Any] = scope
lowerCAmelCase : Optional[Any] = projection_dim
def lowercase__ ( self : Any ):
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : List[Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Any = None
if self.use_token_type_ids:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : str = None
if self.use_labels:
lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Dict = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
lowerCAmelCase : Tuple = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str ):
lowerCAmelCase : int = TFDPRContextEncoder(config=UpperCAmelCase_ )
lowerCAmelCase : Any = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : List[str] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : List[str] = TFDPRQuestionEncoder(config=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : int = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : Any = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase__ ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Optional[int] = TFDPRReader(config=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowercase__ ( self : Any ):
lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : List[Any] = config_and_inputs
lowerCAmelCase : str = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : int = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : str = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Dict = False
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = TFDPRModelTester(self )
lowerCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase_ )
def lowercase__ ( self : Any ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase_ )
def lowercase__ ( self : Dict ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase_ )
@slow
def lowercase__ ( self : List[Any] ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : str = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Any = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Optional[int] = TFDPRReader.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
lowerCAmelCase : List[Any] = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCAmelCase : List[Any] = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 138 | 1 |
'''simple docstring'''
import operator as op
A : List[str] = '''scaler.pt'''
A : Optional[int] = '''pytorch_model'''
A : Union[str, Any] = '''random_states'''
A : List[Any] = '''optimizer'''
A : Optional[Any] = '''scheduler'''
A : Optional[int] = '''pytorch_model.bin'''
A : List[str] = '''pytorch_model.bin.index.json'''
A : List[Any] = '''model.safetensors'''
A : int = '''model.safetensors.index.json'''
A : List[str] = '''1.10.2'''
A : str = '''py38'''
A : int = '''4.17.0'''
A : Tuple = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
A : Union[str, Any] = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
A : Optional[Any] = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
A : Dict = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
A : Any = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
A : Tuple = '''2.0.1'''
A : List[str] = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
A : Union[str, Any] = ['''default''', '''reduce-overhead''', '''max-autotune''']
A : List[Any] = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
A : Tuple = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
A : List[str] = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
A : Any = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 227 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : str=0.999 ,lowerCamelCase : int="cosine" ,):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase : Tuple ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase : List[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_A : Tuple = []
for i in range(lowerCamelCase ):
_A : List[Any] = i / num_diffusion_timesteps
_A : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ) ,lowerCamelCase ) )
return torch.tensor(lowerCamelCase ,dtype=torch.floataa )
class __lowerCamelCase ( a_ , a_ ):
"""simple docstring"""
a = [e.name for e in KarrasDiffusionSchedulers]
a = 2
@register_to_config
def __init__( self : int , SCREAMING_SNAKE_CASE : int = 1000 , SCREAMING_SNAKE_CASE : float = 0.0_0085 , SCREAMING_SNAKE_CASE : float = 0.012 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[Union[np.ndarray, List[float]]] = None , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : str = "linspace" , SCREAMING_SNAKE_CASE : int = 0 , ):
if trained_betas is not None:
_A : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa)
elif beta_schedule == "linear":
_A : List[Any] = torch.linspace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_A : Any = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_A : Optional[Any] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE)
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}')
_A : Any = 1.0 - self.betas
_A : List[Any] = torch.cumprod(self.alphas , dim=0)
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any=None):
if schedule_timesteps is None:
_A : Dict = self.timesteps
_A : List[Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter) == 0:
_A : Dict = 1 if len(SCREAMING_SNAKE_CASE) > 1 else 0
else:
_A : Union[str, Any] = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE) else timestep
_A : int = self._index_counter[timestep_int]
return indices[pos].item()
@property
def A ( self : Optional[Any]):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , ):
_A : Tuple = self.index_for_timestep(SCREAMING_SNAKE_CASE)
if self.state_in_first_order:
_A : Any = self.sigmas[step_index]
else:
_A : int = self.sigmas_interpol[step_index]
_A : Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def A ( self : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, torch.device] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , ):
_A : Optional[Any] = num_inference_steps
_A : int = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_A : Tuple = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE)[::-1].copy()
elif self.config.timestep_spacing == "leading":
_A : Optional[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A : int = (np.arange(0 , SCREAMING_SNAKE_CASE) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_A : List[str] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A : str = (np.arange(SCREAMING_SNAKE_CASE , 0 , -step_ratio)).round().copy().astype(SCREAMING_SNAKE_CASE)
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.')
_A : List[str] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
_A : Optional[int] = torch.from_numpy(np.log(SCREAMING_SNAKE_CASE)).to(SCREAMING_SNAKE_CASE)
_A : str = np.interp(SCREAMING_SNAKE_CASE , np.arange(0 , len(SCREAMING_SNAKE_CASE)) , SCREAMING_SNAKE_CASE)
_A : str = np.concatenate([sigmas, [0.0]]).astype(np.floataa)
_A : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE).to(device=SCREAMING_SNAKE_CASE)
# interpolate sigmas
_A : Optional[int] = sigmas.log().lerp(sigmas.roll(1).log() , 0.5).exp()
_A : Any = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]])
_A : List[Any] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]])
if str(SCREAMING_SNAKE_CASE).startswith('mps'):
# mps does not support float64
_A : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE , dtype=torch.floataa)
else:
_A : Dict = torch.from_numpy(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
# interpolate timesteps
_A : Optional[int] = self.sigma_to_t(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE , dtype=timesteps.dtype)
_A : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1).flatten()
_A : Optional[Any] = torch.cat([timesteps[:1], interleaved_timesteps])
_A : str = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_A : Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE)
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]):
# get log sigma
_A : Dict = sigma.log()
# get distribution
_A : Any = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_A : Tuple = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2)
_A : Union[str, Any] = low_idx + 1
_A : Dict = self.log_sigmas[low_idx]
_A : List[Any] = self.log_sigmas[high_idx]
# interpolate sigmas
_A : Dict = (low - log_sigma) / (low - high)
_A : Union[str, Any] = w.clamp(0 , 1)
# transform interpolation to time range
_A : int = (1 - w) * low_idx + w * high_idx
_A : Any = t.view(sigma.shape)
return t
@property
def A ( self : Any):
return self.sample is None
def A ( self : int , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , SCREAMING_SNAKE_CASE : bool = True , ):
_A : Optional[int] = self.index_for_timestep(SCREAMING_SNAKE_CASE)
# advance index counter by 1
_A : Dict = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_A : Tuple = self.sigmas[step_index]
_A : Dict = self.sigmas_interpol[step_index + 1]
_A : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_A : int = self.sigmas[step_index - 1]
_A : Union[str, Any] = self.sigmas_interpol[step_index]
_A : Dict = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_A : List[Any] = 0
_A : Dict = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_A : Tuple = sigma_hat if self.state_in_first_order else sigma_interpol
_A : Tuple = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_A : Any = sigma_hat if self.state_in_first_order else sigma_interpol
_A : Union[str, Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample')
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`')
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_A : int = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_A : str = sigma_interpol - sigma_hat
# store for 2nd order step
_A : List[str] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_A : List[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_A : Optional[int] = sigma_next - sigma_hat
_A : str = self.sample
_A : Any = None
_A : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE)
def A ( self : Any , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_A : str = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE):
# mps does not support float64
_A : Any = self.timesteps.to(original_samples.device , dtype=torch.floataa)
_A : List[str] = timesteps.to(original_samples.device , dtype=torch.floataa)
else:
_A : str = self.timesteps.to(original_samples.device)
_A : str = timesteps.to(original_samples.device)
_A : int = [self.index_for_timestep(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) for t in timesteps]
_A : Tuple = sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
_A : List[Any] = sigma.unsqueeze(-1)
_A : Dict = original_samples + noise * sigma
return noisy_samples
def __len__( self : List[Any]):
return self.config.num_train_timesteps
| 227 | 1 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Any = ("""dense.weight""", """attention.self.query""", """attention.self.key""", """attention.self.value""")
__magic_name__ : Union[str, Any] = (
("""layer.""", """layer_"""),
("""word_embeddings.weight""", """word_embeddings"""),
("""position_embeddings.weight""", """position_embeddings"""),
("""token_type_embeddings.weight""", """token_type_embeddings"""),
(""".""", """/"""),
("""LayerNorm/weight""", """LayerNorm/gamma"""),
("""LayerNorm/bias""", """LayerNorm/beta"""),
("""weight""", """kernel"""),
)
if not os.path.isdir(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
__magic_name__ : List[str] = model.state_dict()
def to_tf_var_name(_A ):
for patt, repl in iter(_UpperCamelCase ):
__magic_name__ : int = name.replace(_UpperCamelCase, _UpperCamelCase )
return f'bert/{name}'
def create_tf_var(_A, _A, _A ):
__magic_name__ : Any = tf.dtypes.as_dtype(tensor.dtype )
__magic_name__ : List[Any] = tf.get_variable(dtype=_UpperCamelCase, shape=tensor.shape, name=_UpperCamelCase, initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_UpperCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__magic_name__ : List[str] = to_tf_var_name(_UpperCamelCase )
__magic_name__ : str = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__magic_name__ : Optional[int] = torch_tensor.T
__magic_name__ : Dict = create_tf_var(tensor=_UpperCamelCase, name=_UpperCamelCase, session=_UpperCamelCase )
tf.keras.backend.set_value(_UpperCamelCase, _UpperCamelCase )
__magic_name__ : Dict = session.run(_UpperCamelCase )
print(f'Successfully created {tf_name}: {np.allclose(_UpperCamelCase, _UpperCamelCase )}' )
__magic_name__ : Dict = tf.train.Saver(tf.trainable_variables() )
saver.save(_UpperCamelCase, os.path.join(_UpperCamelCase, model_name.replace("""-""", """_""" ) + """.ckpt""" ) )
def UpperCamelCase ( _A=None ):
"""simple docstring"""
__magic_name__ : Any = argparse.ArgumentParser()
parser.add_argument("""--model_name""", type=_UpperCamelCase, required=_UpperCamelCase, help="""model name e.g. bert-base-uncased""" )
parser.add_argument(
"""--cache_dir""", type=_UpperCamelCase, default=_UpperCamelCase, required=_UpperCamelCase, help="""Directory containing pytorch model""" )
parser.add_argument("""--pytorch_model_path""", type=_UpperCamelCase, required=_UpperCamelCase, help="""/path/to/<pytorch-model-name>.bin""" )
parser.add_argument("""--tf_cache_dir""", type=_UpperCamelCase, required=_UpperCamelCase, help="""Directory in which to save tensorflow model""" )
__magic_name__ : Tuple = parser.parse_args(_UpperCamelCase )
__magic_name__ : List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name, state_dict=torch.load(args.pytorch_model_path ), cache_dir=args.cache_dir, )
convert_pytorch_checkpoint_to_tf(model=_UpperCamelCase, ckpt_dir=args.tf_cache_dir, model_name=args.model_name )
if __name__ == "__main__":
main()
| 342 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = True
@register_to_config
def __init__( self , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = ("DownEncoderBlock2D",) , __UpperCAmelCase = ("UpDecoderBlock2D",) , __UpperCAmelCase = (64,) , __UpperCAmelCase = 1 , __UpperCAmelCase = "silu" , __UpperCAmelCase = 4 , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , __UpperCAmelCase = 0.18_215 , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
__lowerCamelCase = Encoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , down_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , double_z=__UpperCAmelCase , )
# pass init params to Decoder
__lowerCamelCase = Decoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , up_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , act_fn=__UpperCAmelCase , )
__lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__lowerCamelCase = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
__lowerCamelCase = False
__lowerCamelCase = False
# only relevant if vae tiling is enabled
__lowerCamelCase = self.config.sample_size
__lowerCamelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowerCamelCase = 0.25
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , (Encoder, Decoder) ):
__lowerCamelCase = value
def lowerCamelCase ( self , __UpperCAmelCase = True ):
'''simple docstring'''
__lowerCamelCase = use_tiling
def lowerCamelCase ( self ):
'''simple docstring'''
self.enable_tiling(__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = True
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = {}
def fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , '''set_processor''' ):
__lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return processors
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(__UpperCAmelCase )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , '''set_processor''' ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
module.set_processor(__UpperCAmelCase )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(__UpperCAmelCase , return_dict=__UpperCAmelCase )
if self.use_slicing and x.shape[0] > 1:
__lowerCamelCase = [self.encoder(__UpperCAmelCase ) for x_slice in x.split(1 )]
__lowerCamelCase = torch.cat(__UpperCAmelCase )
else:
__lowerCamelCase = self.encoder(__UpperCAmelCase )
__lowerCamelCase = self.quant_conv(__UpperCAmelCase )
__lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(__UpperCAmelCase , return_dict=__UpperCAmelCase )
__lowerCamelCase = self.post_quant_conv(__UpperCAmelCase )
__lowerCamelCase = self.decoder(__UpperCAmelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
@apply_forward_hook
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
__lowerCamelCase = [self._decode(__UpperCAmelCase ).sample for z_slice in z.split(1 )]
__lowerCamelCase = torch.cat(__UpperCAmelCase )
else:
__lowerCamelCase = self._decode(__UpperCAmelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = min(a.shape[2] , b.shape[2] , __UpperCAmelCase )
for y in range(__UpperCAmelCase ):
__lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = min(a.shape[3] , b.shape[3] , __UpperCAmelCase )
for x in range(__UpperCAmelCase ):
__lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
__lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowerCamelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowerCamelCase = []
for i in range(0 , x.shape[2] , __UpperCAmelCase ):
__lowerCamelCase = []
for j in range(0 , x.shape[3] , __UpperCAmelCase ):
__lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowerCamelCase = self.encoder(__UpperCAmelCase )
__lowerCamelCase = self.quant_conv(__UpperCAmelCase )
row.append(__UpperCAmelCase )
rows.append(__UpperCAmelCase )
__lowerCamelCase = []
for i, row in enumerate(__UpperCAmelCase ):
__lowerCamelCase = []
for j, tile in enumerate(__UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase )
if j > 0:
__lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) )
__lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 )
__lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
__lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowerCamelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowerCamelCase = []
for i in range(0 , z.shape[2] , __UpperCAmelCase ):
__lowerCamelCase = []
for j in range(0 , z.shape[3] , __UpperCAmelCase ):
__lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowerCamelCase = self.post_quant_conv(__UpperCAmelCase )
__lowerCamelCase = self.decoder(__UpperCAmelCase )
row.append(__UpperCAmelCase )
rows.append(__UpperCAmelCase )
__lowerCamelCase = []
for i, row in enumerate(__UpperCAmelCase ):
__lowerCamelCase = []
for j, tile in enumerate(__UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase )
if j > 0:
__lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) )
__lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , ):
'''simple docstring'''
__lowerCamelCase = sample
__lowerCamelCase = self.encode(__UpperCAmelCase ).latent_dist
if sample_posterior:
__lowerCamelCase = posterior.sample(generator=__UpperCAmelCase )
else:
__lowerCamelCase = posterior.mode()
__lowerCamelCase = self.decode(__UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
| 330 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : int = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class __UpperCamelCase ( _A ):
SCREAMING_SNAKE_CASE = "swin2sr"
SCREAMING_SNAKE_CASE = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self : Dict , __SCREAMING_SNAKE_CASE : Optional[int]=6_4 , __SCREAMING_SNAKE_CASE : Tuple=1 , __SCREAMING_SNAKE_CASE : str=3 , __SCREAMING_SNAKE_CASE : List[Any]=1_8_0 , __SCREAMING_SNAKE_CASE : Dict=[6, 6, 6, 6, 6, 6] , __SCREAMING_SNAKE_CASE : Optional[Any]=[6, 6, 6, 6, 6, 6] , __SCREAMING_SNAKE_CASE : Tuple=8 , __SCREAMING_SNAKE_CASE : str=2.0 , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Tuple=0.0_2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : List[Any]=1.0 , __SCREAMING_SNAKE_CASE : int="1conv" , __SCREAMING_SNAKE_CASE : List[str]="pixelshuffle" , **__SCREAMING_SNAKE_CASE : Tuple , ):
super().__init__(**__SCREAMING_SNAKE_CASE)
A = image_size
A = patch_size
A = num_channels
A = embed_dim
A = depths
A = len(__SCREAMING_SNAKE_CASE)
A = num_heads
A = window_size
A = mlp_ratio
A = qkv_bias
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = drop_path_rate
A = hidden_act
A = use_absolute_embeddings
A = layer_norm_eps
A = initializer_range
A = upscale
A = img_range
A = resi_connection
A = upsampler
| 353 |
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__A : Union[str, Any] = get_logger(__name__)
class __UpperCamelCase ( enum.Enum ):
SCREAMING_SNAKE_CASE = "all_checks"
SCREAMING_SNAKE_CASE = "basic_checks"
SCREAMING_SNAKE_CASE = "no_checks"
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__=None ):
"""simple docstring"""
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(lowercase__ ) - set(lowercase__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowercase__ ) - set(lowercase__ ) ) )
if len(set(lowercase__ ) - set(lowercase__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowercase__ ) - set(lowercase__ ) ) )
A = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
A = " for " + verification_name if verification_name is not None else ""
if len(lowercase__ ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
"""simple docstring"""
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(lowercase__ ) - set(lowercase__ ) ) > 0:
raise ExpectedMoreSplits(str(set(lowercase__ ) - set(lowercase__ ) ) )
if len(set(lowercase__ ) - set(lowercase__ ) ) > 0:
raise UnexpectedSplits(str(set(lowercase__ ) - set(lowercase__ ) ) )
A = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowercase__ ) > 0:
raise NonMatchingSplitsSizesError(str(lowercase__ ) )
logger.info("All the splits matched successfully." )
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ = True ):
"""simple docstring"""
if record_checksum:
A = shaaaa()
with open(lowercase__ , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"" ):
m.update(lowercase__ )
A = m.hexdigest()
else:
A = None
return {"num_bytes": os.path.getsize(lowercase__ ), "checksum": checksum}
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 57 | 0 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> bool:
if len(UpperCamelCase ) == 0:
return False
lowerCamelCase__ : List[str] = len(UpperCamelCase ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , UpperCamelCase )
else:
return binary_search(a_list[midpoint + 1 :] , UpperCamelCase )
if __name__ == "__main__":
_A : List[str] =input('''Enter numbers separated by comma:\n''').strip()
_A : Union[str, Any] =[int(item.strip()) for item in user_input.split(''',''')]
_A : Union[str, Any] =int(input('''Enter the number to be found in the list:\n''').strip())
_A : Optional[int] ='''''' if binary_search(sequence, target) else '''not '''
print(F'{target} was {not_str}found in {sequence}')
| 41 | import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__a : List[Any] = logging.getLogger(__name__)
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=lowercase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=lowercase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=lowercase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=lowercase , default=1000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=lowercase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=lowercase , type=lowercase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=lowercase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=lowercase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
__lowercase = parser.parse_args()
return args
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
def fn(lowercase ):
return tokenizer(examples['''text'''] )
return fn
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
__lowercase = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
__lowercase = tf.train.Features(feature=lowercase )
__lowercase = tf.train.Example(features=lowercase )
__lowercase = example.SerializeToString()
records.append(lowercase )
return records
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__lowercase = min(len(lowercase ) , args.limit )
__lowercase = dataset.select(range(lowercase ) )
print(F"Limiting the dataset to {args.limit} entries." )
__lowercase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__lowercase = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowercase ):
os.makedirs(lowercase )
else:
__lowercase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__lowercase = tokenize_function(lowercase )
__lowercase = dataset.map(lowercase , batched=lowercase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowercase ):
# Concatenate all texts.
__lowercase = {k: sum(examples[k] , [] ) for k in examples.keys()}
__lowercase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__lowercase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__lowercase = {
k: [t[i : i + args.max_length] for i in range(0 , lowercase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__lowercase = dataset_tokenized.map(lowercase , batched=lowercase , batch_size=1000 , num_proc=4 )
__lowercase = 0
__lowercase = 0
for shard in range(0 , len(lowercase ) , args.shard_size ):
__lowercase = grouped_dataset[shard : shard + args.shard_size]
__lowercase = len(dataset_snapshot['''input_ids'''] )
__lowercase = os.path.join(lowercase , F"dataset-{shard_count}-{records_containing}.tfrecord" )
__lowercase = get_serialized_examples(lowercase )
with tf.io.TFRecordWriter(lowercase ) as out_file:
for i in range(len(lowercase ) ):
__lowercase = serialized_examples[i]
out_file.write(lowercase )
print('''Wrote file {} containing {} records'''.format(lowercase , lowercase ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , '''w''' ) as f:
print(F"Total {args.split} records: {total_records}" , file=lowercase )
if __name__ == "__main__":
__a : Optional[Any] = parse_args()
main(args) | 210 | 0 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_UpperCAmelCase : List[Any] = False
try:
_UpperCAmelCase : List[str] = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class lowercase :
def __init__( self , A_ = None , A_ = [] ) -> int:
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = choices
UpperCamelCase = prompt
if sys.platform == "win32":
UpperCamelCase = '*'
else:
UpperCamelCase = '➔ '
def __UpperCamelCase ( self , A_ , A_ = "" ) -> int:
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , A_ )
else:
forceWrite(self.choices[index] , A_ )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
if index == self.position:
forceWrite(F''' {self.arrow_char} ''' )
self.write_choice(A_ )
else:
forceWrite(F''' {self.choices[index]}''' )
reset_cursor()
def __UpperCamelCase ( self , A_ , A_ = 1 ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(A_ )
move_cursor(A_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(A_ )] for number in range(10 )] )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = int(chr(self.current_selection ) )
UpperCamelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , A_ )
else:
return
else:
return
def __UpperCamelCase ( self , A_ = 0 ) -> Optional[Any]:
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
UpperCamelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(A_ )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
UpperCamelCase = int(builtins.input() )
except ValueError:
UpperCamelCase = default_choice
else:
UpperCamelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(A_ , '\n' )
return choice
| 110 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : List[Any] = KandinskyVaaControlnetImgaImgPipeline
__lowercase : Optional[Any] = ["image_embeds", "negative_image_embeds", "image", "hint"]
__lowercase : Any = ["image_embeds", "negative_image_embeds", "image", "hint"]
__lowercase : Union[str, Any] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__lowercase : Optional[int] = False
@property
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
UpperCamelCase = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.dummy_unet
UpperCamelCase = self.dummy_movq
UpperCamelCase = {
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.0_0085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
UpperCamelCase = DDIMScheduler(**A_ )
UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __UpperCamelCase ( self , A_ , A_=0 ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
# create init_image
UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) )
# create hint
UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(A_ )
else:
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = 'cpu'
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**A_ )
UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase = output.images
UpperCamelCase = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase = np.array(
[0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
UpperCamelCase = init_image.resize((512, 512) )
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
UpperCamelCase = torch.from_numpy(np.array(A_ ) ).float() / 255.0
UpperCamelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCamelCase = 'A robot, 4k photo'
UpperCamelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
UpperCamelCase = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase , UpperCamelCase = pipe_prior(
A_ , image=A_ , strength=0.85 , generator=A_ , negative_prompt='' , ).to_tuple()
UpperCamelCase = pipeline(
image=A_ , image_embeds=A_ , negative_image_embeds=A_ , hint=A_ , generator=A_ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='np' , )
UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A_ , A_ )
| 110 | 1 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : int = logging.get_logger(__name__)
_lowercase : Dict = '''▁'''
_lowercase : int = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_lowercase : List[str] = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
_lowercase : List[str] = {
'''facebook/m2m100_418M''': 10_24,
}
# fmt: off
_lowercase : Tuple = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class _UpperCAmelCase ( A__ ):
a__ : Dict = VOCAB_FILES_NAMES
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Tuple = ["input_ids", "attention_mask"]
a__ : int = []
a__ : Optional[Any] = []
def __init__( self : Any , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Union[str, Any]=None , _lowercase : Any=None , _lowercase : str="<s>" , _lowercase : int="</s>" , _lowercase : str="</s>" , _lowercase : int="<pad>" , _lowercase : Dict="<unk>" , _lowercase : List[Any]="m2m100" , _lowercase : Optional[Dict[str, Any]] = None , _lowercase : List[Any]=8 , **_lowercase : List[str] , ):
__UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
__UpperCAmelCase = language_codes
__UpperCAmelCase = FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCAmelCase = {lang_code: F'''__{lang_code}__''' for lang_code in fairseq_language_code}
__UpperCAmelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__lowerCamelCase )
for lang_code in fairseq_language_code
if self.get_lang_token(__lowerCamelCase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , language_codes=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase = vocab_file
__UpperCAmelCase = load_json(__lowerCamelCase )
__UpperCAmelCase = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase = spm_file
__UpperCAmelCase = load_spm(__lowerCamelCase , self.sp_model_kwargs )
__UpperCAmelCase = len(self.encoder )
__UpperCAmelCase = {
self.get_lang_token(__lowerCamelCase ): self.encoder_size + i for i, lang_code in enumerate(__lowerCamelCase )
}
__UpperCAmelCase = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowerCamelCase )}
__UpperCAmelCase = {v: k for k, v in self.lang_token_to_id.items()}
__UpperCAmelCase = src_lang if src_lang is not None else '''en'''
__UpperCAmelCase = tgt_lang
__UpperCAmelCase = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCAmelCase = num_madeup_words
@property
def a ( self : int ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def a ( self : List[str] ):
return self._src_lang
@src_lang.setter
def a ( self : int , _lowercase : str ):
__UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a ( self : str , _lowercase : str ):
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def a ( self : Optional[int] , _lowercase : Any ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__lowerCamelCase , self.encoder[self.unk_token] )
def a ( self : str , _lowercase : int ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__lowerCamelCase , self.unk_token )
def a ( self : List[Any] , _lowercase : Tuple ):
__UpperCAmelCase = []
__UpperCAmelCase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
__UpperCAmelCase = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
__UpperCAmelCase = [1] * len(self.prefix_tokens )
__UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCamelCase )) + ([0] * len(__lowerCamelCase )) + suffix_ones
def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a ( self : Dict ):
__UpperCAmelCase = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : List[str] , _lowercase : Dict ):
__UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__UpperCAmelCase = {}
__UpperCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def a ( self : Any , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = Path(__lowerCamelCase )
if not save_dir.is_dir():
raise OSError(F'''{save_directory} should be a directory''' )
__UpperCAmelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
__UpperCAmelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(__lowerCamelCase , '''wb''' ) as fi:
__UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (str(__lowerCamelCase ), str(__lowerCamelCase ))
def a ( self : Union[str, Any] , _lowercase : List[str] , _lowercase : str = "en" , _lowercase : Optional[List[str]] = None , _lowercase : str = "ro" , **_lowercase : Dict , ):
__UpperCAmelCase = src_lang
__UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
def a ( self : int , _lowercase : int , _lowercase : Optional[str] , _lowercase : Optional[str] , **_lowercase : Any ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__UpperCAmelCase = src_lang
__UpperCAmelCase = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , **__lowerCamelCase )
__UpperCAmelCase = self.get_lang_id(__lowerCamelCase )
__UpperCAmelCase = tgt_lang_id
return inputs
def a ( self : int ):
self.set_src_lang_special_tokens(self.src_lang )
def a ( self : int ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def a ( self : List[str] , _lowercase : str ):
__UpperCAmelCase = self.get_lang_token(__lowerCamelCase )
__UpperCAmelCase = self.lang_token_to_id[lang_token]
__UpperCAmelCase = [self.cur_lang_id]
__UpperCAmelCase = [self.eos_token_id]
def a ( self : List[Any] , _lowercase : str ):
__UpperCAmelCase = self.get_lang_token(__lowerCamelCase )
__UpperCAmelCase = self.lang_token_to_id[lang_token]
__UpperCAmelCase = [self.cur_lang_id]
__UpperCAmelCase = [self.eos_token_id]
def a ( self : int , _lowercase : str ):
return self.lang_code_to_token[lang]
def a ( self : int , _lowercase : str ):
__UpperCAmelCase = self.get_lang_token(__lowerCamelCase )
return self.lang_token_to_id[lang_token]
def lowercase__ ( snake_case_ :List[str] , snake_case_ :str ):
__UpperCAmelCase = sentencepiece.SentencePieceProcessor(**_A )
spm.Load(str(_A ) )
return spm
def lowercase__ ( snake_case_ :List[Any] ):
with open(_A , '''r''' ) as f:
return json.load(_A )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :Optional[Any] ):
with open(_A , '''w''' ) as f:
json.dump(_A , _A , indent=2 )
| 332 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[str] , **__lowerCamelCase : Dict ) -> List[str]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Any , **__lowerCamelCase : List[str] ) -> Any:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[int] , **__lowerCamelCase : int ) -> Dict:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Dict:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314 | 0 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase = 1000 ):
__lowerCAmelCase : List[str] = 2**power
__lowerCAmelCase : List[Any] = str(_UpperCamelCase )
__lowerCAmelCase : List[str] = list(_UpperCamelCase )
__lowerCAmelCase : str = 0
for i in list_num:
sum_of_num += int(_UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
lowerCamelCase__ = int(input("""Enter the power of 2: """).strip())
print("""2 ^ """, power, """ = """, 2**power)
lowerCamelCase__ = solution(power)
print("""Sum of the digits is: """, result) | 182 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
) | 182 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = LayoutLMTokenizer
snake_case = LayoutLMTokenizerFast
snake_case = True
snake_case = True
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
super().setUp()
_A = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCAmelCase ( self : Union[str, Any] , **__UpperCAmelCase : Tuple ):
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : str ):
'''simple docstring'''
_A = "UNwant\u00E9d,running"
_A = "unwanted, running"
return input_text, output_text
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = self.tokenizer_class(self.vocab_file )
_A = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
pass
| 79 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''gpt_neox'''
def __init__( self : List[Any] , __UpperCAmelCase : List[Any]=50432 , __UpperCAmelCase : Any=6144 , __UpperCAmelCase : List[str]=44 , __UpperCAmelCase : List[Any]=64 , __UpperCAmelCase : List[str]=24576 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Tuple=0.25 , __UpperCAmelCase : Optional[Any]=10000 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Tuple=2048 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Union[str, Any]=1E-5 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : str=True , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Tuple , ):
'''simple docstring'''
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
_A = vocab_size
_A = max_position_embeddings
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = rotary_pct
_A = rotary_emb_base
_A = attention_dropout
_A = hidden_dropout
_A = classifier_dropout
_A = initializer_range
_A = layer_norm_eps
_A = use_cache
_A = tie_word_embeddings
_A = use_parallel_residual
_A = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''' )
_A = self.rope_scaling.get("type" , __UpperCAmelCase )
_A = self.rope_scaling.get("factor" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 79 | 1 |
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase : List[Any] = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
lowerCAmelCase : int = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
lowerCAmelCase : str = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase : Optional[Any] = f"""down_blocks.{i}.resnets.{j}."""
lowerCAmelCase : Optional[int] = f"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase : int = f"""down_blocks.{i}.attentions.{j}."""
lowerCAmelCase : Tuple = f"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase : str = f"""up_blocks.{i}.resnets.{j}."""
lowerCAmelCase : Optional[int] = f"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase : str = f"""up_blocks.{i}.attentions.{j}."""
lowerCAmelCase : Any = f"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase : Any = f"""down_blocks.{i}.downsamplers.0.conv."""
lowerCAmelCase : List[str] = f"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase : int = f"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase : List[str] = f"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase : List[str] = 'mid_block.attentions.0.'
lowerCAmelCase : Any = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase : Dict = f"""mid_block.resnets.{j}."""
lowerCAmelCase : int = f"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def A_( A : Dict):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
UpperCamelCase = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCamelCase = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCamelCase = v.replace(UpperCamelCase__ , UpperCamelCase__)
UpperCamelCase = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCamelCase = v.replace(UpperCamelCase__ , UpperCamelCase__)
UpperCamelCase = v
UpperCamelCase = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase : str = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase : Optional[Any] = f"""encoder.down_blocks.{i}.resnets.{j}."""
lowerCAmelCase : str = f"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase : Optional[int] = f"""down_blocks.{i}.downsamplers.0."""
lowerCAmelCase : Union[str, Any] = f"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase : Tuple = f"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase : List[str] = f"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase : Tuple = f"""decoder.up_blocks.{i}.resnets.{j}."""
lowerCAmelCase : str = f"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase : Dict = f"""mid_block.resnets.{i}."""
lowerCAmelCase : Tuple = f"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def A_( A : Union[str, Any]):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1)
def A_( A : str):
UpperCamelCase = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCamelCase = v.replace(UpperCamelCase__ , UpperCamelCase__)
UpperCamelCase = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCamelCase = v.replace(UpperCamelCase__ , UpperCamelCase__)
UpperCamelCase = v
UpperCamelCase = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCamelCase = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'''mid.attn_1.{weight_name}.weight''' in k:
print(f'''Reshaping {k} for SD format''')
UpperCamelCase = reshape_weight_for_sd(UpperCamelCase__)
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase : Tuple = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
lowerCAmelCase : Optional[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase : Dict = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase : str = {'q': 0, 'k': 1, 'v': 2}
def A_( A : Optional[int]):
UpperCamelCase = {}
UpperCamelCase = {}
UpperCamelCase = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight')
or k.endswith('.self_attn.k_proj.weight')
or k.endswith('.self_attn.v_proj.weight')
):
UpperCamelCase = k[: -len('.q_proj.weight')]
UpperCamelCase = k[-len('q_proj.weight')]
if k_pre not in capture_qkv_weight:
UpperCamelCase = [None, None, None]
UpperCamelCase = v
continue
if (
k.endswith('.self_attn.q_proj.bias')
or k.endswith('.self_attn.k_proj.bias')
or k.endswith('.self_attn.v_proj.bias')
):
UpperCamelCase = k[: -len('.q_proj.bias')]
UpperCamelCase = k[-len('q_proj.bias')]
if k_pre not in capture_qkv_bias:
UpperCamelCase = [None, None, None]
UpperCamelCase = v
continue
UpperCamelCase = textenc_pattern.sub(lambda A: protected[re.escape(m.group(0))] , UpperCamelCase__)
UpperCamelCase = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing')
UpperCamelCase = textenc_pattern.sub(lambda A: protected[re.escape(m.group(0))] , UpperCamelCase__)
UpperCamelCase = torch.cat(UpperCamelCase__)
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing')
UpperCamelCase = textenc_pattern.sub(lambda A: protected[re.escape(m.group(0))] , UpperCamelCase__)
UpperCamelCase = torch.cat(UpperCamelCase__)
return new_state_dict
def A_( A : Dict):
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
lowerCAmelCase : List[Any] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase : str = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase : Union[str, Any] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase : Union[str, Any] = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase : int = load_file(unet_path, device='cpu')
else:
lowerCAmelCase : List[str] = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
lowerCAmelCase : str = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
lowerCAmelCase : str = load_file(vae_path, device='cpu')
else:
lowerCAmelCase : Union[str, Any] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
lowerCAmelCase : Any = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
lowerCAmelCase : Optional[Any] = load_file(text_enc_path, device='cpu')
else:
lowerCAmelCase : Union[str, Any] = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
lowerCAmelCase : Any = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
lowerCAmelCase : Optional[Any] = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase : List[Any] = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase : Union[str, Any] = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase : Any = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase : Optional[Any] = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase : str = {'transformer.' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase : Optional[Any] = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase : Union[str, Any] = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase : str = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase : List[str] = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase : Optional[Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase : List[Any] = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase : int = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 367 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
lowerCAmelCase : List[Any] = [8, 5, 9, 7]
lowerCAmelCase : str = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowerCAmelCase : Tuple = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_ , A_ , )-> None:
'''simple docstring'''
UpperCamelCase = claim_vector
UpperCamelCase = allocated_resources_table
UpperCamelCase = maximum_claim_table
def UpperCAmelCase_ ( self )-> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCAmelCase_ ( self )-> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCAmelCase_ ( self )-> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(A_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCAmelCase_ ( self )-> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(A_ ): i for i in self.__need()}
def UpperCAmelCase_ ( self , **A_ )-> None:
'''simple docstring'''
UpperCamelCase = self.__need()
UpperCamelCase = self.__allocated_resources_table
UpperCamelCase = self.__available_resources()
UpperCamelCase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
UpperCamelCase = False
for each_need in need_list:
UpperCamelCase = True
for index, need in enumerate(A_ ):
if need > available_resources[index]:
UpperCamelCase = False
break
if execution:
UpperCamelCase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCamelCase = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(A_ )
# update available/freed resources stack
UpperCamelCase = np.array(A_ ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(A_ ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(A_ ) + 1}'''
+ ' '.join(F'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(A_ ) + 1}'''
+ ' '.join(F'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(A_ ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(A_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 251 | 0 |
class __magic_name__ :
"""simple docstring"""
def __init__( self :Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = 0
A_ : Optional[Any] = 0
A_ : int = {}
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Any ):
'''simple docstring'''
if vertex not in self.adjacency:
A_ : Union[str, Any] = {}
self.num_vertices += 1
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[Any] , snake_case :Optional[Any] , snake_case :int ):
'''simple docstring'''
self.add_vertex(snake_case )
self.add_vertex(snake_case )
if head == tail:
return
A_ : Any = weight
A_ : Optional[Any] = weight
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Optional[int] = self.get_edges()
for edge in edges:
A_ , A_ , A_ : Optional[Any] = edge
edges.remove((tail, head, weight) )
for i in range(len(snake_case ) ):
A_ : List[str] = list(edges[i] )
edges.sort(key=lambda snake_case : e[2] )
for i in range(len(snake_case ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
A_ : List[Any] = edges[i][2] + 1
for edge in edges:
A_ , A_ , A_ : Any = edge
A_ : List[Any] = weight
A_ : Optional[Any] = weight
def __str__( self :Tuple ):
'''simple docstring'''
A_ : Any = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
A_ : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n" )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : str = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def SCREAMING_SNAKE_CASE ( snake_case :str=None , snake_case :Tuple=None ):
'''simple docstring'''
A_ : Dict = Graph()
if vertices is None:
A_ : str = []
if edges is None:
A_ : Optional[Any] = []
for vertex in vertices:
g.add_vertex(snake_case )
for edge in edges:
g.add_edge(*snake_case )
return g
class __magic_name__ :
"""simple docstring"""
def __init__( self :Optional[Any] ):
'''simple docstring'''
A_ : Any = {}
A_ : Union[str, Any] = {}
def __len__( self :Optional[int] ):
'''simple docstring'''
return len(self.parent )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Any ):
'''simple docstring'''
if item in self.parent:
return self.find(snake_case )
A_ : Optional[int] = item
A_ : Dict = 0
return item
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Tuple ):
'''simple docstring'''
if item not in self.parent:
return self.make_set(snake_case )
if item != self.parent[item]:
A_ : Union[str, Any] = self.find(self.parent[item] )
return self.parent[item]
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :int , snake_case :List[str] ):
'''simple docstring'''
A_ : str = self.find(snake_case )
A_ : Optional[int] = self.find(snake_case )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
A_ : Tuple = roota
return roota
if self.rank[roota] < self.rank[roota]:
A_ : List[str] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
A_ : str = roota
return roota
return None
@staticmethod
def SCREAMING_SNAKE_CASE ( snake_case :Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = graph.num_vertices
A_ : int = Graph.UnionFind()
A_ : Any = []
while num_components > 1:
A_ : Union[str, Any] = {}
for vertex in graph.get_vertices():
A_ : Optional[int] = -1
A_ : Optional[Any] = graph.get_edges()
for edge in edges:
A_ , A_ , A_ : Optional[Any] = edge
edges.remove((tail, head, weight) )
for edge in edges:
A_ , A_ , A_ : Optional[Any] = edge
A_ : Optional[int] = union_find.find(snake_case )
A_ : List[Any] = union_find.find(snake_case )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A_ : Dict = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A_ : List[str] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
A_ , A_ , A_ : Optional[int] = cheap_edge[vertex]
if union_find.find(snake_case ) != union_find.find(snake_case ):
union_find.union(snake_case , snake_case )
mst_edges.append(cheap_edge[vertex] )
A_ : Optional[Any] = num_components - 1
A_ : Dict = Graph.build(edges=snake_case )
return mst
| 300 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , snake_case :AutoencoderKL , snake_case :CLIPTextModel , snake_case :CLIPTokenizer , snake_case :UNetaDConditionModel , snake_case :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case :StableDiffusionSafetyChecker , snake_case :CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def __call__( self :Any , snake_case :Union[str, List[str]] , snake_case :int = 512 , snake_case :int = 512 , snake_case :int = 50 , snake_case :float = 7.5 , snake_case :Optional[Union[str, List[str]]] = None , snake_case :Optional[int] = 1 , snake_case :float = 0.0 , snake_case :Optional[torch.Generator] = None , snake_case :Optional[torch.FloatTensor] = None , snake_case :Optional[str] = "pil" , snake_case :bool = True , snake_case :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case :int = 1 , snake_case :Optional[torch.FloatTensor] = None , **snake_case :Optional[Any] , ):
'''simple docstring'''
if isinstance(snake_case , snake_case ):
A_ : Dict = 1
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = len(snake_case )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case )}." )
# get prompt text embeddings
A_ : int = self.tokenizer(
snake_case , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A_ : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A_ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A_ : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A_ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A_ , A_ , A_ : int = text_embeddings.shape
A_ : List[str] = text_embeddings.repeat(1 , snake_case , 1 )
A_ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A_ : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A_ : List[str]
if negative_prompt is None:
A_ : List[str] = [""]
elif type(snake_case ) is not type(snake_case ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case )} !="
f" {type(snake_case )}." )
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = [negative_prompt]
elif batch_size != len(snake_case ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A_ : Any = negative_prompt
A_ : Optional[int] = text_input_ids.shape[-1]
A_ : Dict = self.tokenizer(
snake_case , padding="max_length" , max_length=snake_case , truncation=snake_case , return_tensors="pt" , )
A_ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A_ : Tuple = uncond_embeddings.shape[1]
A_ : Dict = uncond_embeddings.repeat(snake_case , snake_case , 1 )
A_ : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A_ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A_ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A_ : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A_ : Tuple = torch.randn(
snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(self.device )
A_ : Optional[Any] = torch.randn(snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(
self.device )
else:
A_ : int = torch.randn(
snake_case , generator=snake_case , device=self.device , dtype=snake_case )
A_ : Optional[int] = torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A_ : Tuple = latents_reference.to(self.device )
A_ : Any = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A_ : List[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
A_ : Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
A_ : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A_ : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A_ : Optional[Any] = 0 if dx < 0 else dx
A_ : Optional[Any] = 0 if dy < 0 else dy
A_ : List[str] = max(-dx , 0 )
A_ : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A_ : Any = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A_ : str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A_ : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ : List[str] = {}
if accepts_eta:
A_ : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
A_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : Any = self.scheduler.scale_model_input(snake_case , snake_case )
# predict the noise residual
A_ : List[str] = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
A_ , A_ : Dict = noise_pred.chunk(2 )
A_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A_ : Tuple = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case )
A_ : List[str] = 1 / 0.18215 * latents
A_ : Tuple = self.vae.decode(snake_case ).sample
A_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A_ : int = self.feature_extractor(self.numpy_to_pil(snake_case ) , return_tensors="pt" ).to(
self.device )
A_ , A_ : List[str] = self.safety_checker(
images=snake_case , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A_ : List[str] = None
if output_type == "pil":
A_ : Optional[int] = self.numpy_to_pil(snake_case )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case )
| 300 | 1 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
if not is_accelerate_available():
return method
_UpperCamelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(_lowerCamelCase ) < version.parse("0.17.0" ):
return method
def wrapper(self , *a__ , **a__ ):
if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self , *_lowerCamelCase , **_lowerCamelCase )
return wrapper
| 369 | import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowercase_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowercase_)
_UpperCamelCase = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_)
_UpperCamelCase = tokenizer.decode(greedy_ids[0])
with CaptureStdout() as cs:
_UpperCamelCase = TextStreamer(lowercase_)
model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ , streamer=lowercase_)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase = cs.out[:-1]
self.assertEqual(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowercase_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowercase_)
_UpperCamelCase = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_)
_UpperCamelCase = tokenizer.decode(greedy_ids[0])
_UpperCamelCase = TextIteratorStreamer(lowercase_)
_UpperCamelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase = Thread(target=model.generate , kwargs=lowercase_)
thread.start()
_UpperCamelCase = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowercase_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowercase_)
_UpperCamelCase = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_)
_UpperCamelCase = greedy_ids[:, input_ids.shape[1] :]
_UpperCamelCase = tokenizer.decode(new_greedy_ids[0])
with CaptureStdout() as cs:
_UpperCamelCase = TextStreamer(lowercase_ , skip_prompt=lowercase_)
model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ , streamer=lowercase_)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase = cs.out[:-1]
self.assertEqual(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("distilgpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("distilgpt2").to(lowercase_)
_UpperCamelCase = -1
_UpperCamelCase = torch.ones((1, 5) , device=lowercase_).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCamelCase = TextStreamer(lowercase_ , skip_special_tokens=lowercase_)
model.generate(lowercase_ , max_new_tokens=1 , do_sample=lowercase_ , streamer=lowercase_)
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCamelCase = cs.out[:-1] # Remove the final "\n"
_UpperCamelCase = tokenizer(lowercase_ , return_tensors="pt")
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1))
def __UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowercase_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowercase_)
_UpperCamelCase = TextIteratorStreamer(lowercase_ , timeout=0.0_01)
_UpperCamelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase = Thread(target=model.generate , kwargs=lowercase_)
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowercase_):
_UpperCamelCase = ""
for new_text in streamer:
streamer_text += new_text
| 63 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A: Tuple = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Tuple = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[Any] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: str = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
A: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : list ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(UpperCamelCase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(UpperCamelCase ) == 1:
return True
UpperCAmelCase : Union[str, Any] = series[1] - series[0]
for index in range(len(UpperCamelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _snake_case ( UpperCamelCase : list ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(UpperCamelCase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
UpperCAmelCase : Any = 0
for val in series:
answer += val
return answer / len(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 | 1 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__lowerCamelCase = logging.get_logger(__name__)
def UpperCamelCase ( __lowerCamelCase : int ):
snake_case : int = r"\w+[.]\d+"
snake_case : Any = re.findall(__lowerCamelCase , __lowerCamelCase )
for pat in pats:
snake_case : Dict = key.replace(__lowerCamelCase , "_".join(pat.split("." ) ) )
return key
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ):
snake_case : Optional[Any] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
snake_case : int = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
snake_case : int = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
snake_case : str = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
snake_case : Union[str, Any] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
snake_case : Union[str, Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
snake_case : Optional[Any] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
snake_case : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
snake_case : Optional[int] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
snake_case : Tuple = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : int=42 ):
# Step 1: Convert pytorch tensor to numpy
snake_case : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
snake_case : Dict = flax_model.init_weights(PRNGKey(__lowerCamelCase ) )
snake_case : List[str] = flatten_dict(__lowerCamelCase )
snake_case : List[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case : Optional[int] = rename_key(__lowerCamelCase )
snake_case : Optional[Any] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
snake_case , snake_case : Optional[Any] = rename_key_and_reshape_tensor(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
snake_case : Tuple = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
| 10 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCamelCase = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
__lowerCamelCase = json.load(f)
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = f"""facebook/wmt19-{pair}"""
snake_case : Optional[Any] = self.get_tokenizer(snake_case__ )
snake_case : Dict = self.get_model(snake_case__ )
snake_case : List[Any] = bleu_data[pair]["src"]
snake_case : int = bleu_data[pair]["tgt"]
snake_case : Union[str, Any] = tokenizer(snake_case__ , return_tensors="pt" , truncation=snake_case__ , padding="longest" ).to(snake_case__ )
snake_case : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
snake_case : Optional[int] = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
snake_case : Optional[int] = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores["bleu"] , snake_case__ )
| 10 | 1 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=64 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=512 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , lowerCamelCase_=2 , lowerCamelCase_=2 , lowerCamelCase_=2 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=1 , ):
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
a = q_groups
a = k_groups
a = v_groups
a = post_attention_groups
a = intermediate_groups
a = output_groups
def UpperCamelCase_ (self ):
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ (self ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = SqueezeBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , lowerCamelCase_ )
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = SqueezeBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = SqueezeBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.num_labels
a = SqueezeBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.num_labels
a = SqueezeBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.num_choices
a = SqueezeBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.prepare_config_and_inputs()
((a) , (a) , (a) , (a) , (a) , (a)) = config_and_inputs
a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__A = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__A = False
__A = True
__A = False
def UpperCamelCase_ (self ):
"""simple docstring"""
a = SqueezeBertModelTester(self )
a = ConfigTester(self , config_class=lowerCamelCase_ , dim=37 )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCamelCase_ )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = SqueezeBertModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
a = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
a = model(lowerCamelCase_ )[0]
a = torch.Size((1, 3) )
self.assertEqual(output.shape , lowerCamelCase_ )
a = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-4 ) )
| 227 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_lowercase: Optional[int] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_lowercase: Dict = 10
_lowercase: Optional[Any] = 256
def a( A : List[str] ) -> Optional[MinHash]:
"""simple docstring"""
if len(A ) < MIN_NUM_TOKENS:
return None
a = MinHash(num_perm=A )
for token in set(A ):
min_hash.update(token.encode() )
return min_hash
def a( A : str ) -> Set[str]:
"""simple docstring"""
return {t for t in NON_ALPHA.split(A ) if len(t.strip() ) > 0}
class _lowercase :
"""simple docstring"""
def __init__(self , *,
lowerCamelCase_ = 0.85 , ):
"""simple docstring"""
a = duplication_jaccard_threshold
a = NUM_PERM
a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
a = defaultdict(lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self._index.query(lowerCamelCase_ )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(lowerCamelCase_ , lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCamelCase_ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = []
for base, duplicates in self._duplicate_clusters.items():
a = [base] + list(lowerCamelCase_ )
# reformat the cluster to be a list of dict
a = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(lowerCamelCase_ )
return duplicate_clusters
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = self.get_duplicate_clusters()
with open(lowerCamelCase_ , "w" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def a( A : Any ) -> List[Any]:
"""simple docstring"""
a , a = element
a = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def a( A : Type[Dataset] ) -> List[Any]:
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(A , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def a( A : Type[Dataset] , A : float ) -> Dict:
"""simple docstring"""
a = DuplicationIndex(duplication_jaccard_threshold=A )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(A ) ) , max_queue_size=100 ) ):
di.add(A , A )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def a( A : str , A : str ) -> float:
"""simple docstring"""
a = get_tokens(A )
a = get_tokens(A )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_lowercase: int = None
def a( A : str , A : Tuple ) -> int:
"""simple docstring"""
a = []
for elementa in cluster:
a = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
a = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(A , A ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
a = 1
extremes.append(A )
return extremes
def a( A : str , A : List[str] , A : int ) -> Tuple:
"""simple docstring"""
global _shared_dataset
a = dataset
a = []
a = partial(_find_cluster_extremes_shared , jaccard_threshold=A )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
A , A , ) , total=len(A ) , ):
extremes_list.append(A )
return extremes_list
def a( A : Type[Dataset] , A : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
"""simple docstring"""
a = make_duplicate_clusters(A , A )
a = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
a = {}
a = find_extremes(A , A , A )
for extremes in extremes_clusters:
for element in extremes:
a = element
a = duplicate_indices - set(extreme_dict.keys() )
a = dataset.filter(lambda A , A : idx not in remove_indices , with_indices=A )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
a = element["base_index"] in extreme_dict
if element["is_extreme"]:
a = extreme_dict[element["base_index"]]["copies"]
print(f'''Original dataset size: {len(A )}''' )
print(f'''Number of duplicate clusters: {len(A )}''' )
print(f'''Files in duplicate cluster: {len(A )}''' )
print(f'''Unique files in duplicate cluster: {len(A )}''' )
print(f'''Filtered dataset size: {len(A )}''' )
return ds_filter, duplicate_clusters
| 227 | 1 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "data2vec-audio"
def __init__(self , __a=32 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.1 , __a=0.02 , __a=1e-5 , __a="gelu" , __a=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __a=(5, 2, 2, 2, 2, 2, 2) , __a=(10, 3, 3, 3, 3, 2, 2) , __a=False , __a=16 , __a=19 , __a=5 , __a=0.05 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=0 , __a="sum" , __a=False , __a=False , __a=2_56 , __a=(5_12, 5_12, 5_12, 5_12, 15_00) , __a=(5, 3, 3, 1, 1) , __a=(1, 2, 3, 1, 1) , __a=5_12 , __a=0 , __a=1 , __a=2 , __a=False , __a=3 , __a=2 , __a=3 , __a=None , **__a , ) -> int:
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
UpperCamelCase = hidden_size
UpperCamelCase = feat_extract_activation
UpperCamelCase = list(__a )
UpperCamelCase = list(__a )
UpperCamelCase = list(__a )
UpperCamelCase = conv_bias
UpperCamelCase = num_conv_pos_embeddings
UpperCamelCase = num_conv_pos_embedding_groups
UpperCamelCase = conv_pos_kernel_size
UpperCamelCase = len(self.conv_dim )
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = feat_proj_dropout
UpperCamelCase = final_dropout
UpperCamelCase = layerdrop
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = vocab_size
UpperCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
# ctc loss
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# adapter
UpperCamelCase = add_adapter
UpperCamelCase = adapter_kernel_size
UpperCamelCase = adapter_stride
UpperCamelCase = num_adapter_layers
UpperCamelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase = list(__a )
UpperCamelCase = list(__a )
UpperCamelCase = list(__a )
UpperCamelCase = xvector_output_dim
@property
def snake_case_ (self ) -> Tuple:
return math.prod(self.conv_stride )
| 244 |
"""simple docstring"""
from statistics import mean, stdev
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 3 ):
"""simple docstring"""
UpperCamelCase = min(_SCREAMING_SNAKE_CASE )
UpperCamelCase = max(_SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data]
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 3 ):
"""simple docstring"""
UpperCamelCase = mean(_SCREAMING_SNAKE_CASE )
UpperCamelCase = stdev(_SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
| 244 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
_lowerCamelCase : Dict =original_name.split('.' )[0]
_lowerCamelCase : Union[str, Any] =key.split('.' )
_lowerCamelCase : List[Any] =int(key_list[key_list.index(_UpperCamelCase ) - 2] )
_lowerCamelCase : Optional[Any] =int(key_list[key_list.index(_UpperCamelCase ) - 1] )
_lowerCamelCase : Optional[int] =orig_block_num - offset
_lowerCamelCase : Union[str, Any] =key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def a_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] =OrderedDict()
_lowerCamelCase , _lowerCamelCase : Dict =0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
_lowerCamelCase : List[str] =key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
_lowerCamelCase : str =key[: key.find('proj' )]
_lowerCamelCase : List[Any] =key.replace(_UpperCamelCase , F'''patch_embeddings.{total_embed_found}.''' )
_lowerCamelCase : List[str] =key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
_lowerCamelCase : Any ='poolformer.encoder.' + key
if "mlp.fc1" in key:
_lowerCamelCase : Union[str, Any] =replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
_lowerCamelCase : Tuple =replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
_lowerCamelCase : Optional[int] =replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , 'norm1' , 'before_norm' )
if "norm2" in key:
_lowerCamelCase : Any =replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
_lowerCamelCase : Optional[Any] =replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
_lowerCamelCase : List[str] =replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
_lowerCamelCase : int =key.replace('head' , 'classifier' )
_lowerCamelCase : Dict =value
return new_state_dict
def a_ ( ):
'''simple docstring'''
_lowerCamelCase : Any ='http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCamelCase : Any =Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return image
@torch.no_grad()
def a_ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict =PoolFormerConfig()
# set attributes based on model_name
_lowerCamelCase : str ='huggingface/label-files'
_lowerCamelCase : List[Any] =model_name[-3:]
_lowerCamelCase : Optional[Any] =1_000
_lowerCamelCase : int ='imagenet-1k-id2label.json'
_lowerCamelCase : Optional[int] =(1, 1_000)
# set config attributes
_lowerCamelCase : Dict =json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) )
_lowerCamelCase : List[Any] ={int(_UpperCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Any =idalabel
_lowerCamelCase : Optional[Any] ={v: k for k, v in idalabel.items()}
if size == "s12":
_lowerCamelCase : Optional[int] =[2, 2, 6, 2]
_lowerCamelCase : str =[64, 128, 320, 512]
_lowerCamelCase : List[str] =4.0
_lowerCamelCase : Optional[int] =0.9
elif size == "s24":
_lowerCamelCase : Dict =[4, 4, 12, 4]
_lowerCamelCase : List[Any] =[64, 128, 320, 512]
_lowerCamelCase : Any =4.0
_lowerCamelCase : Optional[Any] =0.9
elif size == "s36":
_lowerCamelCase : Dict =[6, 6, 18, 6]
_lowerCamelCase : Optional[Any] =[64, 128, 320, 512]
_lowerCamelCase : Tuple =4.0
_lowerCamelCase : List[str] =1e-6
_lowerCamelCase : Any =0.9
elif size == "m36":
_lowerCamelCase : List[str] =[6, 6, 18, 6]
_lowerCamelCase : Optional[int] =[96, 192, 384, 768]
_lowerCamelCase : Any =4.0
_lowerCamelCase : Tuple =1e-6
_lowerCamelCase : str =0.95
elif size == "m48":
_lowerCamelCase : str =[8, 8, 24, 8]
_lowerCamelCase : List[str] =[96, 192, 384, 768]
_lowerCamelCase : Any =4.0
_lowerCamelCase : Tuple =1e-6
_lowerCamelCase : Tuple =0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
_lowerCamelCase : Tuple =PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
# Prepare image
_lowerCamelCase : str =prepare_img()
_lowerCamelCase : str =image_processor(images=_UpperCamelCase , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
_lowerCamelCase : List[Any] =torch.load(_UpperCamelCase , map_location=torch.device('cpu' ) )
# rename keys
_lowerCamelCase : Any =rename_keys(_UpperCamelCase )
# create HuggingFace model and load state dict
_lowerCamelCase : List[str] =PoolFormerForImageClassification(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
# Define image processor
_lowerCamelCase : List[str] =PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
_lowerCamelCase : Union[str, Any] =image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
_lowerCamelCase : Tuple =model(_UpperCamelCase )
_lowerCamelCase : Optional[int] =outputs.logits
# define expected logit slices for different models
if size == "s12":
_lowerCamelCase : List[Any] =torch.tensor([-0.30_45, -0.67_58, -0.48_69] )
elif size == "s24":
_lowerCamelCase : int =torch.tensor([0.44_02, -0.13_74, -0.80_45] )
elif size == "s36":
_lowerCamelCase : str =torch.tensor([-0.60_80, -0.51_33, -0.58_98] )
elif size == "m36":
_lowerCamelCase : List[str] =torch.tensor([0.39_52, 0.22_63, -1.26_68] )
elif size == "m48":
_lowerCamelCase : Any =torch.tensor([0.11_67, -0.06_56, -0.34_23] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCamelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 199 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""image_processor""", """tokenizer"""]
__UpperCAmelCase : Optional[Any] ="""CLIPImageProcessor"""
__UpperCAmelCase : Union[str, Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self , __a=None , __a=None , **__a ):
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
__lowerCAmelCase = kwargs.pop("feature_extractor" )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self , __a=None , __a=None , __a=None , **__a ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__lowerCAmelCase = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
__lowerCAmelCase = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
__lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@property
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 57 | 0 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_A : Optional[int] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Optional[int] , A : Tuple ) ->Optional[int]:
super().__init__()
lowerCamelCase__ : Union[str, Any] = torchvision.models.resnetaaa(pretrained=A )
lowerCamelCase__ : Tuple = list(model.children() )[:-2]
lowerCamelCase__ : List[Any] = nn.Sequential(*A )
lowerCamelCase__ : Union[str, Any] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def __lowerCamelCase ( self : Union[str, Any] , A : int ) ->Optional[int]:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
lowerCamelCase__ : Union[str, Any] = self.pool(self.model(A ) )
lowerCamelCase__ : Optional[Any] = torch.flatten(A , start_dim=2 )
lowerCamelCase__ : Dict = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , A : Union[str, Any] , A : Optional[Any] , A : str , A : Optional[int] , A : Dict ) ->Any:
lowerCamelCase__ : Optional[int] = [json.loads(A ) for l in open(A )]
lowerCamelCase__ : int = os.path.dirname(A )
lowerCamelCase__ : Tuple = tokenizer
lowerCamelCase__ : List[str] = labels
lowerCamelCase__ : Dict = len(A )
lowerCamelCase__ : Dict = max_seq_length
lowerCamelCase__ : Tuple = transforms
def __len__( self : Any ) ->Tuple:
return len(self.data )
def __getitem__( self : int , A : str ) ->Dict:
lowerCamelCase__ : Optional[Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=A ) )
lowerCamelCase__ : Tuple = sentence[0], sentence[1:-1], sentence[-1]
lowerCamelCase__ : Tuple = sentence[: self.max_seq_length]
lowerCamelCase__ : List[str] = torch.zeros(self.n_classes )
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Union[str, Any] = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
lowerCamelCase__ : Optional[int] = self.transforms(A )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __lowerCamelCase ( self : Dict ) ->Any:
lowerCamelCase__ : List[Any] = Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def _a ( UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : List[str] = [len(row['''sentence'''] ) for row in batch]
lowerCamelCase__ : Union[str, Any] = len(UpperCAmelCase ), max(UpperCAmelCase )
lowerCamelCase__ : str = torch.zeros(UpperCAmelCase , UpperCAmelCase , dtype=torch.long )
lowerCamelCase__ : Optional[int] = torch.zeros(UpperCAmelCase , UpperCAmelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(UpperCAmelCase , UpperCAmelCase ) ):
lowerCamelCase__ : Tuple = input_row['''sentence''']
lowerCamelCase__ : Optional[int] = 1
lowerCamelCase__ : Tuple = torch.stack([row['''image'''] for row in batch] )
lowerCamelCase__ : Dict = torch.stack([row['''label'''] for row in batch] )
lowerCamelCase__ : int = torch.stack([row['''image_start_token'''] for row in batch] )
lowerCamelCase__ : str = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _a ( ) -> int:
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 353 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Any = logging.get_logger(__name__)
_A : int = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Dict = "timesformer"
def __init__( self : List[str] , A : int=2_2_4 , A : Optional[Any]=1_6 , A : str=3 , A : str=8 , A : Any=7_6_8 , A : Dict=1_2 , A : Optional[Any]=1_2 , A : Any=3_0_7_2 , A : str="gelu" , A : Optional[int]=0.0 , A : Union[str, Any]=0.0 , A : List[Any]=0.02 , A : int=1e-6 , A : Tuple=True , A : Any="divided_space_time" , A : Optional[Any]=0 , **A : Tuple , ) ->str:
super().__init__(**A )
lowerCamelCase__ : Optional[Any] = image_size
lowerCamelCase__ : int = patch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Optional[int] = num_frames
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : Dict = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : str = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = qkv_bias
lowerCamelCase__ : str = attention_type
lowerCamelCase__ : List[str] = drop_path_rate
| 265 | 0 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = torch.load(lowercase , map_location='cpu' )
lowerCamelCase_ = chkpt['model']
# We have the base model one level deeper than the original XLM repository
lowerCamelCase_ = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowerCamelCase_ = v
else:
lowerCamelCase_ = v
lowerCamelCase_ = chkpt['params']
lowerCamelCase_ = {n: v for n, v in config.items() if not isinstance(lowercase , (torch.FloatTensor, numpy.ndarray) )}
lowerCamelCase_ = chkpt['dico_word2id']
lowerCamelCase_ = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
lowerCamelCase_ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCamelCase_ = pytorch_dump_folder_path + '/' + CONFIG_NAME
lowerCamelCase_ = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(lowercase , lowercase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowercase , indent=2 ) + '\n' )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowercase , indent=2 ) + '\n' )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase : Any = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 204 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase : List[str] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Optional[int] , lowercase : Optional[int] ):
'''simple docstring'''
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def _SCREAMING_SNAKE_CASE ( lowercase : np.ndarray , lowercase : Optional[str] , lowercase : Optional[str] ):
'''simple docstring'''
lowerCamelCase_ = to_pil_image(lowercase )
lowerCamelCase_ , lowerCamelCase_ = pil_image.size
lowerCamelCase_ = pytesseract.image_to_data(lowercase , lang=lowercase , output_type='dict' , config=lowercase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
lowerCamelCase_ = [idx for idx, word in enumerate(lowercase ) if not word.strip()]
lowerCamelCase_ = [word for idx, word in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCamelCase_ = []
for x, y, w, h in zip(lowercase , lowercase , lowercase , lowercase ):
lowerCamelCase_ = [x, y, x + w, y + h]
actual_boxes.append(lowercase )
# finally, normalize the bounding boxes
lowerCamelCase_ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase , lowercase , lowercase ) )
assert len(lowercase ) == len(lowercase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : int , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : bool = True , A_ : float = 1 / 255 , A_ : bool = True , A_ : Union[float, Iterable[float]] = None , A_ : Union[float, Iterable[float]] = None , A_ : bool = True , A_ : Optional[str] = None , A_ : Optional[str] = "" , **A_ : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = size if size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = get_size_dict(A_ )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_value
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCamelCase_ = apply_ocr
lowerCamelCase_ = ocr_lang
lowerCamelCase_ = tesseract_config
def a__ ( self : str , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCamelCase_ = (size['height'], size['width'])
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def a__ ( self : Any , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def a__ ( self : Union[str, Any] , A_ : np.ndarray , A_ : Union[float, Iterable[float]] , A_ : Union[float, Iterable[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def a__ ( self : List[Any] , A_ : ImageInput , A_ : bool = None , A_ : Dict[str, int] = None , A_ : Dict=None , A_ : bool = None , A_ : float = None , A_ : bool = None , A_ : Union[float, Iterable[float]] = None , A_ : Union[float, Iterable[float]] = None , A_ : bool = None , A_ : Optional[str] = None , A_ : Optional[str] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : Any , ) -> PIL.Image.Image:
"""simple docstring"""
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(A_ )
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCamelCase_ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCamelCase_ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCamelCase_ = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(A_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract' )
lowerCamelCase_ = []
lowerCamelCase_ = []
for image in images:
lowerCamelCase_ , lowerCamelCase_ = apply_tesseract(A_ , A_ , A_ )
words_batch.append(A_ )
boxes_batch.append(A_ )
if do_resize:
lowerCamelCase_ = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCamelCase_ = BatchFeature(data={'pixel_values': images} , tensor_type=A_ )
if apply_ocr:
lowerCamelCase_ = words_batch
lowerCamelCase_ = boxes_batch
return data
| 204 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= tempfile.mkdtemp()
lowercase__ : Union[str, Any]= BlipImageProcessor()
lowercase__ : Any= GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
lowercase__ : Any= BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
lowercase__ : str= InstructBlipProcessor(snake_case__ , snake_case__ , snake_case__ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).tokenizer
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).qformer_tokenizer
def UpperCAmelCase_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ : Union[str, Any]= [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Optional[Any]= self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ : Optional[Any]= self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
lowercase__ : Optional[Any]= InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
self.assertIsInstance(processor.qformer_tokenizer , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.get_image_processor()
lowercase__ : Optional[int]= self.get_tokenizer()
lowercase__ : Any= self.get_qformer_tokenizer()
lowercase__ : Any= InstructBlipProcessor(
tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ )
lowercase__ : Tuple= self.prepare_image_inputs()
lowercase__ : int= image_processor(snake_case__ , return_tensors="np" )
lowercase__ : List[str]= processor(images=snake_case__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= self.get_image_processor()
lowercase__ : List[str]= self.get_tokenizer()
lowercase__ : Optional[Any]= self.get_qformer_tokenizer()
lowercase__ : Optional[Any]= InstructBlipProcessor(
tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ )
lowercase__ : Optional[Any]= "lower newer"
lowercase__ : Any= processor(text=snake_case__ )
lowercase__ : Union[str, Any]= tokenizer(snake_case__ , return_token_type_ids=snake_case__ )
lowercase__ : List[Any]= qformer_tokenizer(snake_case__ , return_token_type_ids=snake_case__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= self.get_image_processor()
lowercase__ : Optional[int]= self.get_tokenizer()
lowercase__ : Any= self.get_qformer_tokenizer()
lowercase__ : Optional[int]= InstructBlipProcessor(
tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ )
lowercase__ : List[str]= "lower newer"
lowercase__ : Optional[int]= self.prepare_image_inputs()
lowercase__ : Dict= processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.get_image_processor()
lowercase__ : int= self.get_tokenizer()
lowercase__ : Optional[int]= self.get_qformer_tokenizer()
lowercase__ : Any= InstructBlipProcessor(
tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ )
lowercase__ : Dict= [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : Optional[Any]= processor.batch_decode(snake_case__ )
lowercase__ : List[Any]= tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= self.get_image_processor()
lowercase__ : Optional[int]= self.get_tokenizer()
lowercase__ : Optional[Any]= self.get_qformer_tokenizer()
lowercase__ : int= InstructBlipProcessor(
tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ )
lowercase__ : Union[str, Any]= "lower newer"
lowercase__ : int= self.prepare_image_inputs()
lowercase__ : Dict= processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 355 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
a : Optional[Any] = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
a : str = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def lowercase__() ->List[Any]:
"""simple docstring"""
lowercase__ : str= calculate_rouge(A , A , bootstrap_aggregation=A , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(A , A )
lowercase__ : Optional[int]= calculate_rouge(A , A , bootstrap_aggregation=A , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def lowercase__() ->int:
"""simple docstring"""
lowercase__ : Optional[int]= "rougeLsum"
lowercase__ : str= calculate_rouge(A , A , newline_sep=A , rouge_keys=[k] )[k]
lowercase__ : Union[str, Any]= calculate_rouge(A , A , newline_sep=A , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : Tuple= ["rouge1", "rouge2", "rougeL"]
lowercase__ : Optional[Any]= calculate_rouge(A , A , newline_sep=A , rouge_keys=A )
lowercase__ : Dict= calculate_rouge(A , A , newline_sep=A , rouge_keys=A )
assert score_sep == score_no_sep
def lowercase__() ->Optional[int]:
"""simple docstring"""
lowercase__ : int= [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
lowercase__ : Dict= [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(A , A , newline_sep=A ) == calculate_rouge(A , A , newline_sep=A )
def lowercase__() ->Dict:
"""simple docstring"""
lowercase__ : List[str]= [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
lowercase__ : Union[str, Any]= [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
lowercase__ : List[Any]= calculate_rouge(A , A , rouge_keys=["rougeLsum"] , newline_sep=A )["rougeLsum"]
lowercase__ : Optional[Any]= calculate_rouge(A , A , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def lowercase__() ->Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any]= Path("examples/seq2seq/test_data/wmt_en_ro" )
lowercase__ : Union[str, Any]= calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(A , A )
lowercase__ : List[Any]= calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=A )
assert isinstance(A , A )
| 150 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : str = '''time_series_transformer'''
_snake_case : Optional[int] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = "student_t" , _UpperCamelCase = "nll" , _UpperCamelCase = 1 , _UpperCamelCase = [1, 2, 3, 4, 5, 6, 7] , _UpperCamelCase = "mean" , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = True , _UpperCamelCase = "gelu" , _UpperCamelCase = 6_4 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 0.02 , _UpperCamelCase=True , **_UpperCamelCase , ) -> Optional[int]:
# time series specific configuration
UpperCAmelCase_ : Any = prediction_length
UpperCAmelCase_ : int = context_length or prediction_length
UpperCAmelCase_ : Tuple = distribution_output
UpperCAmelCase_ : Optional[int] = loss
UpperCAmelCase_ : Optional[Any] = input_size
UpperCAmelCase_ : List[Any] = num_time_features
UpperCAmelCase_ : str = lags_sequence
UpperCAmelCase_ : Union[str, Any] = scaling
UpperCAmelCase_ : List[Any] = num_dynamic_real_features
UpperCAmelCase_ : Tuple = num_static_real_features
UpperCAmelCase_ : Tuple = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase_ : str = cardinality
else:
UpperCAmelCase_ : List[str] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase_ : Any = embedding_dimension
else:
UpperCAmelCase_ : str = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase_ : int = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase_ : str = input_size * len(_UpperCamelCase ) + self._number_of_features
UpperCAmelCase_ : Optional[Any] = d_model
UpperCAmelCase_ : List[Any] = encoder_attention_heads
UpperCAmelCase_ : List[Any] = decoder_attention_heads
UpperCAmelCase_ : int = encoder_ffn_dim
UpperCAmelCase_ : Any = decoder_ffn_dim
UpperCAmelCase_ : Dict = encoder_layers
UpperCAmelCase_ : Tuple = decoder_layers
UpperCAmelCase_ : Union[str, Any] = dropout
UpperCAmelCase_ : Optional[Any] = attention_dropout
UpperCAmelCase_ : Optional[Any] = activation_dropout
UpperCAmelCase_ : List[str] = encoder_layerdrop
UpperCAmelCase_ : List[str] = decoder_layerdrop
UpperCAmelCase_ : int = activation_function
UpperCAmelCase_ : List[Any] = init_std
UpperCAmelCase_ : Optional[Any] = use_cache
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 29 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowercase__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Any = fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , _UpperCAmelCase ).groups()[0]
class a__ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Union[str, Any], lowerCAmelCase : Tuple, lowerCAmelCase : Tuple=None, lowerCAmelCase : List[Any]=None ) -> Optional[Any]:
lowercase : str = file_names
lowercase : Optional[Any] = image_transform
lowercase : int = label_to_id
def __len__( self : List[Any] ) -> Any:
return len(self.file_names )
def __getitem__( self : str, lowerCAmelCase : Optional[int] ) -> Optional[Any]:
lowercase : List[Any] = self.file_names[idx]
lowercase : Tuple = PIL.Image.open(lowerCAmelCase )
lowercase : Tuple = raw_image.convert('RGB' )
if self.image_transform is not None:
lowercase : Optional[Any] = self.image_transform(lowerCAmelCase )
lowercase : Any = extract_label(lowerCAmelCase )
if self.label_to_id is not None:
lowercase : List[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if args.with_tracking:
lowercase : Optional[int] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
lowercase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase : Union[str, Any] = config['lr']
lowercase : Any = int(config['num_epochs'] )
lowercase : Union[str, Any] = int(config['seed'] )
lowercase : List[Any] = int(config['batch_size'] )
lowercase : str = config['image_size']
if not isinstance(_UpperCAmelCase , (list, tuple) ):
lowercase : Dict = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
lowercase : Dict = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowercase : Dict = int(args.checkpointing_steps )
else:
raise ValueError(
f'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
lowercase : Tuple = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowercase : Optional[int] = os.path.split(_UpperCAmelCase )[-1].split('.' )[0]
accelerator.init_trackers(_UpperCAmelCase , _UpperCAmelCase )
# Grab all the image filenames
lowercase : Optional[Any] = [os.path.join(args.data_dir , _UpperCAmelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
lowercase : str = [extract_label(_UpperCAmelCase ) for fname in file_names]
lowercase : List[Any] = list(set(_UpperCAmelCase ) )
id_to_label.sort()
lowercase : Optional[Any] = {lbl: i for i, lbl in enumerate(_UpperCAmelCase )}
# Set the seed before splitting the data.
np.random.seed(_UpperCAmelCase )
torch.manual_seed(_UpperCAmelCase )
torch.cuda.manual_seed_all(_UpperCAmelCase )
# Split our filenames between train and validation
lowercase : List[Any] = np.random.permutation(len(_UpperCAmelCase ) )
lowercase : Optional[Any] = int(0.8 * len(_UpperCAmelCase ) )
lowercase : int = random_perm[:cut]
lowercase : Any = random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowercase : Dict = Compose([RandomResizedCrop(_UpperCAmelCase , scale=(0.5, 1.0) ), ToTensor()] )
lowercase : List[Any] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=_UpperCAmelCase , label_to_id=_UpperCAmelCase )
# For evaluation, we use a deterministic Resize
lowercase : List[Any] = Compose([Resize(_UpperCAmelCase ), ToTensor()] )
lowercase : List[str] = PetsDataset([file_names[i] for i in eval_split] , image_transform=_UpperCAmelCase , label_to_id=_UpperCAmelCase )
# Instantiate dataloaders.
lowercase : Dict = DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , batch_size=_UpperCAmelCase , num_workers=4 )
lowercase : Any = DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , batch_size=_UpperCAmelCase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase : List[Any] = create_model('resnet50d' , pretrained=_UpperCAmelCase , num_classes=len(_UpperCAmelCase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase : Union[str, Any] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowercase : Dict = False
for param in model.get_classifier().parameters():
lowercase : Dict = True
# We normalize the batches of images to be a bit faster.
lowercase : int = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
lowercase : Dict = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowercase : Any = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
lowercase : List[Any] = OneCycleLR(optimizer=_UpperCAmelCase , max_lr=_UpperCAmelCase , epochs=_UpperCAmelCase , steps_per_epoch=len(_UpperCAmelCase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[int] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase : Tuple = 0
# We also need to keep track of the starting epoch so files are named properly
lowercase : List[str] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
lowercase : Any = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowercase : List[str] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowercase : Dict = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowercase : Any = os.path.splitext(_UpperCAmelCase )[0]
if "epoch" in training_difference:
lowercase : List[Any] = int(training_difference.replace('epoch_' , '' ) ) + 1
lowercase : List[Any] = None
else:
lowercase : Optional[Any] = int(training_difference.replace('step_' , '' ) )
lowercase : int = resume_step // len(_UpperCAmelCase )
resume_step -= starting_epoch * len(_UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase , _UpperCAmelCase ):
model.train()
if args.with_tracking:
lowercase : str = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowercase : Any = accelerator.skip_first_batches(_UpperCAmelCase , _UpperCAmelCase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowercase : Union[str, Any] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowercase : Any = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowercase : List[str] = (batch['image'] - mean) / std
lowercase : Union[str, Any] = model(_UpperCAmelCase )
lowercase : Optional[int] = torch.nn.functional.cross_entropy(_UpperCAmelCase , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase : Union[str, Any] = f'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowercase : Optional[Any] = os.path.join(args.output_dir , _UpperCAmelCase )
accelerator.save_state(_UpperCAmelCase )
model.eval()
lowercase : int = 0
lowercase : List[Any] = 0
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowercase : List[str] = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowercase : Optional[Any] = (batch['image'] - mean) / std
with torch.no_grad():
lowercase : int = model(_UpperCAmelCase )
lowercase : Tuple = outputs.argmax(dim=-1 )
lowercase , lowercase : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['label']) )
lowercase : Union[str, Any] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowercase : List[str] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}: {1_00 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'accuracy': 1_00 * eval_metric,
'train_loss': total_loss.item() / len(_UpperCAmelCase ),
'epoch': epoch,
} , step=_UpperCAmelCase , )
if checkpointing_steps == "epoch":
lowercase : str = f'''epoch_{epoch}'''
if args.output_dir is not None:
lowercase : Any = os.path.join(args.output_dir , _UpperCAmelCase )
accelerator.save_state(_UpperCAmelCase )
if args.with_tracking:
accelerator.end_training()
def lowercase__ ( ) -> Tuple:
'''simple docstring'''
lowercase : str = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=_UpperCAmelCase , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=_UpperCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=_UpperCAmelCase , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
lowercase : int = parser.parse_args()
lowercase : List[Any] = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 2_24}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 255 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 312 | """simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["keras_nlp"]
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int:
requires_backends(self, ['keras_nlp'])
| 312 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Tuple = 'cvt'
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 192, 384] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-1_2 , **__UpperCAmelCase , ) -> Dict:
super().__init__(**__UpperCAmelCase )
_a = num_channels
_a = patch_sizes
_a = patch_stride
_a = patch_padding
_a = embed_dim
_a = num_heads
_a = depth
_a = mlp_ratio
_a = attention_drop_rate
_a = drop_rate
_a = drop_path_rate
_a = qkv_bias
_a = cls_token
_a = qkv_projection_method
_a = kernel_qkv
_a = padding_kv
_a = stride_kv
_a = padding_q
_a = stride_q
_a = initializer_range
_a = layer_norm_eps | 320 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 320 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
lowercase = tempfile.mkdtemp()
lowercase = SamImageProcessor()
lowercase = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor
def A__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A__ ( self ):
"""simple docstring"""
lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A__ ( self ):
"""simple docstring"""
lowercase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = SamProcessor(image_processor=__lowerCAmelCase )
lowercase = self.prepare_image_inputs()
lowercase = image_processor(__lowerCAmelCase , return_tensors="""np""" )
lowercase = processor(images=__lowerCAmelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = SamProcessor(image_processor=__lowerCAmelCase )
lowercase = [torch.ones((1, 3, 5, 5) )]
lowercase = [[1764, 2646]]
lowercase = [[683, 1024]]
lowercase = processor.post_process_masks(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
lowercase = processor.post_process_masks(
__lowerCAmelCase , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
lowercase = [np.ones((1, 3, 5, 5) )]
lowercase = processor.post_process_masks(__lowerCAmelCase , np.array(__lowerCAmelCase ) , np.array(__lowerCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
lowercase = [[1, 0], [0, 1]]
with self.assertRaises(__lowerCAmelCase ):
lowercase = processor.post_process_masks(__lowerCAmelCase , np.array(__lowerCAmelCase ) , np.array(__lowerCAmelCase ) )
@require_vision
@require_tf
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
lowercase = tempfile.mkdtemp()
lowercase = SamImageProcessor()
lowercase = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor
def A__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A__ ( self ):
"""simple docstring"""
lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A__ ( self ):
"""simple docstring"""
lowercase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = SamProcessor(image_processor=__lowerCAmelCase )
lowercase = self.prepare_image_inputs()
lowercase = image_processor(__lowerCAmelCase , return_tensors="""np""" )
lowercase = processor(images=__lowerCAmelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = SamProcessor(image_processor=__lowerCAmelCase )
lowercase = [tf.ones((1, 3, 5, 5) )]
lowercase = [[1764, 2646]]
lowercase = [[683, 1024]]
lowercase = processor.post_process_masks(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
lowercase = processor.post_process_masks(
__lowerCAmelCase , tf.convert_to_tensor(__lowerCAmelCase ) , tf.convert_to_tensor(__lowerCAmelCase ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
lowercase = [np.ones((1, 3, 5, 5) )]
lowercase = processor.post_process_masks(
__lowerCAmelCase , np.array(__lowerCAmelCase ) , np.array(__lowerCAmelCase ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
lowercase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
lowercase = processor.post_process_masks(
__lowerCAmelCase , np.array(__lowerCAmelCase ) , np.array(__lowerCAmelCase ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
lowercase = tempfile.mkdtemp()
lowercase = SamImageProcessor()
lowercase = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor
def A__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A__ ( self ):
"""simple docstring"""
lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = SamProcessor(image_processor=__lowerCAmelCase )
lowercase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
lowercase = [tf.convert_to_tensor(__lowerCAmelCase )]
lowercase = [torch.tensor(__lowerCAmelCase )]
lowercase = [[1764, 2646]]
lowercase = [[683, 1024]]
lowercase = processor.post_process_masks(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , return_tensors="""tf""" )
lowercase = processor.post_process_masks(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = SamProcessor(image_processor=__lowerCAmelCase )
lowercase = self.prepare_image_inputs()
lowercase = image_processor(__lowerCAmelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
lowercase = processor(images=__lowerCAmelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
lowercase = image_processor(__lowerCAmelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
lowercase = processor(images=__lowerCAmelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
| 32 | """simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
lowercase = s.rsplit(lowerCAmelCase__ , lowerCAmelCase__ )
return new.join(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = {}
lowercase = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowercase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
lowercase = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
lowercase = rreplace(lowerCAmelCase__ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
lowercase = rreplace(lowerCAmelCase__ , """.b""" , """.bias""" , 1 )
lowercase = value.float()
return upgrade
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Any=True ) -> Any:
'''simple docstring'''
from dall_e import Encoder
lowercase = Encoder()
if os.path.exists(lowerCAmelCase__ ):
lowercase = torch.load(lowerCAmelCase__ )
else:
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase__ )
if config_path is not None:
lowercase = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase__ )
else:
lowercase = FlavaImageCodebookConfig()
lowercase = FlavaImageCodebook(lowerCAmelCase__ ).eval()
lowercase = encoder.state_dict()
lowercase = upgrade_state_dict(lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
lowercase = hf_model.state_dict()
lowercase = count_parameters(lowerCAmelCase__ )
lowercase = count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase__ )
else:
return hf_state_dict
if __name__ == "__main__":
__lowerCAmelCase : Tuple =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__lowerCAmelCase : Any =parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 32 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.