code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_lowerCamelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCamelCase : Optional[int] = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : PriorTransformer , UpperCamelCase__ : CLIPVisionModel , UpperCamelCase__ : CLIPImageProcessor , UpperCamelCase__ : HeunDiscreteScheduler , UpperCamelCase__ : ShapERenderer , ):
"""simple docstring"""
super().__init__()
self.register_modules(
prior=UpperCamelCase__ , image_encoder=UpperCamelCase__ , image_processor=UpperCamelCase__ , scheduler=UpperCamelCase__ , renderer=UpperCamelCase__ , )
def A ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
if latents is None:
UpperCamelCase = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=UpperCamelCase__ , dtype=UpperCamelCase__ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCamelCase = latents.to(UpperCamelCase__ )
UpperCamelCase = latents * scheduler.init_noise_sigma
return latents
def A ( self : str , UpperCamelCase__ : Any=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCamelCase = torch.device(f"""cuda:{gpu_id}""" )
UpperCamelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase__ , UpperCamelCase__ )
@property
def A ( self : int ):
"""simple docstring"""
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCamelCase__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def A ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(image[0] , torch.Tensor ):
UpperCamelCase = torch.cat(UpperCamelCase__ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase__ , axis=0 )
if not isinstance(UpperCamelCase__ , torch.Tensor ):
UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
UpperCamelCase = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase__ )
UpperCamelCase = self.image_encoder(UpperCamelCase__ )['last_hidden_state']
UpperCamelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCamelCase = image_embeds.repeat_interleave(UpperCamelCase__ , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase = torch.zeros_like(UpperCamelCase__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCamelCase__ )
def __call__( self : List[Any] , UpperCamelCase__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 2_5 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : float = 4.0 , UpperCamelCase__ : int = 6_4 , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
UpperCamelCase = 1
elif isinstance(UpperCamelCase__ , torch.Tensor ):
UpperCamelCase = image.shape[0]
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
UpperCamelCase = len(UpperCamelCase__ )
else:
raise ValueError(
f"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase__ )}""" )
UpperCamelCase = self._execution_device
UpperCamelCase = batch_size * num_images_per_prompt
UpperCamelCase = guidance_scale > 1.0
UpperCamelCase = self._encode_image(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# prior
self.scheduler.set_timesteps(UpperCamelCase__ , device=UpperCamelCase__ )
UpperCamelCase = self.scheduler.timesteps
UpperCamelCase = self.prior.config.num_embeddings
UpperCamelCase = self.prior.config.embedding_dim
UpperCamelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCamelCase = latents.reshape(latents.shape[0] , UpperCamelCase__ , UpperCamelCase__ )
for i, t in enumerate(self.progress_bar(UpperCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = self.prior(
UpperCamelCase__ , timestep=UpperCamelCase__ , proj_embedding=UpperCamelCase__ , ).predicted_image_embedding
# remove the variance
UpperCamelCase , UpperCamelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCamelCase , UpperCamelCase = noise_pred.chunk(2 )
UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCamelCase = self.scheduler.step(
UpperCamelCase__ , timestep=UpperCamelCase__ , sample=UpperCamelCase__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCamelCase__ )
UpperCamelCase = []
for i, latent in enumerate(UpperCamelCase__ ):
print()
UpperCamelCase = self.renderer.decode(
latent[None, :] , UpperCamelCase__ , size=UpperCamelCase__ , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(UpperCamelCase__ )
UpperCamelCase = torch.stack(UpperCamelCase__ )
if output_type not in ["np", "pil"]:
raise ValueError(f"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
UpperCamelCase = images.cpu().numpy()
if output_type == "pil":
UpperCamelCase = [self.numpy_to_pil(UpperCamelCase__ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCamelCase__ )
| 430 |
'''simple docstring'''
def __lowerCamelCase ( A__ ) -> bool:
"""simple docstring"""
if num < 0:
return False
UpperCamelCase = num
UpperCamelCase = 0
while num > 0:
UpperCamelCase = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 430 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int]=0.999 , UpperCamelCase : Optional[int]="cosine" , ) -> Union[str, Any]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase : List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase : str ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
a_ = []
for i in range(UpperCamelCase ):
a_ = i / num_diffusion_timesteps
a_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase ) / alpha_bar_fn(UpperCamelCase ) , UpperCamelCase ) )
return torch.tensor(UpperCamelCase , dtype=torch.floataa )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_lowerCamelCase : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
_lowerCamelCase : Optional[Any] = 2
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 1000 , _SCREAMING_SNAKE_CASE = 0.0_0_0_8_5 , _SCREAMING_SNAKE_CASE = 0.0_1_2 , _SCREAMING_SNAKE_CASE = "linear" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "epsilon" , _SCREAMING_SNAKE_CASE = "linspace" , _SCREAMING_SNAKE_CASE = 0 , ):
if trained_betas is not None:
a_ = torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "linear":
a_ = torch.linspace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _SCREAMING_SNAKE_CASE , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a_ = betas_for_alpha_bar(_SCREAMING_SNAKE_CASE )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
a_ = 1.0 - self.betas
a_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if schedule_timesteps is None:
a_ = self.timesteps
a_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
a_ = 1 if len(_SCREAMING_SNAKE_CASE ) > 1 else 0
else:
a_ = timestep.cpu().item() if torch.is_tensor(_SCREAMING_SNAKE_CASE ) else timestep
a_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __magic_name__ ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
a_ = self.index_for_timestep(_SCREAMING_SNAKE_CASE )
if self.state_in_first_order:
a_ = self.sigmas[step_index]
else:
a_ = self.sigmas_interpol[step_index]
a_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ):
a_ = num_inference_steps
a_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
a_ = np.linspace(0 , num_train_timesteps - 1 , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )[::-1].copy()
elif self.config.timestep_spacing == "leading":
a_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a_ = (np.arange(0 , _SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1].copy().astype(_SCREAMING_SNAKE_CASE )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
a_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a_ = (np.arange(_SCREAMING_SNAKE_CASE , 0 , -step_ratio )).round().copy().astype(_SCREAMING_SNAKE_CASE )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
a_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
a_ = torch.from_numpy(np.log(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
a_ = np.interp(_SCREAMING_SNAKE_CASE , np.arange(0 , len(_SCREAMING_SNAKE_CASE ) ) , _SCREAMING_SNAKE_CASE )
a_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
a_ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE )
# interpolate sigmas
a_ = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
a_ = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
a_ = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
# mps does not support float64
a_ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
else:
a_ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
# interpolate timesteps
a_ = self.sigma_to_t(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE , dtype=timesteps.dtype )
a_ = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
a_ = torch.cat([timesteps[:1], interleaved_timesteps] )
a_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
a_ = defaultdict(_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE ):
# get log sigma
a_ = sigma.log()
# get distribution
a_ = log_sigma - self.log_sigmas[:, None]
# get sigmas range
a_ = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
a_ = low_idx + 1
a_ = self.log_sigmas[low_idx]
a_ = self.log_sigmas[high_idx]
# interpolate sigmas
a_ = (low - log_sigma) / (low - high)
a_ = w.clamp(0 , 1 )
# transform interpolation to time range
a_ = (1 - w) * low_idx + w * high_idx
a_ = t.view(sigma.shape )
return t
@property
def __magic_name__ ( self ):
return self.sample is None
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ):
a_ = self.index_for_timestep(_SCREAMING_SNAKE_CASE )
# advance index counter by 1
a_ = timestep.cpu().item() if torch.is_tensor(_SCREAMING_SNAKE_CASE ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
a_ = self.sigmas[step_index]
a_ = self.sigmas_interpol[step_index + 1]
a_ = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
a_ = self.sigmas[step_index - 1]
a_ = self.sigmas_interpol[step_index]
a_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
a_ = 0
a_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
a_ = sigma_hat if self.state_in_first_order else sigma_interpol
a_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
a_ = sigma_hat if self.state_in_first_order else sigma_interpol
a_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
a_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
a_ = sigma_interpol - sigma_hat
# store for 2nd order step
a_ = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
a_ = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
a_ = sigma_next - sigma_hat
a_ = self.sample
a_ = None
a_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
a_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_SCREAMING_SNAKE_CASE ):
# mps does not support float64
a_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
a_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
a_ = self.timesteps.to(original_samples.device )
a_ = timesteps.to(original_samples.device )
a_ = [self.index_for_timestep(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for t in timesteps]
a_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
a_ = sigma.unsqueeze(-1 )
a_ = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps | 718 |
import argparse
import json
import subprocess
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] , UpperCamelCase : Tuple ) -> Dict:
"""simple docstring"""
a_ = []
a_ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
a_ = subprocess.run(UpperCamelCase , shell=UpperCamelCase , stdout=subprocess.PIPE )
a_ = output.stdout.decode("""utf-8""" )
a_ = json.loads(UpperCamelCase )
a_ = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(UpperCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" , """w""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) )
if len(UpperCamelCase ) > 0:
a_ = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] ) -> str:
"""simple docstring"""
return values.split(""",""" )
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_A = parser.parse_args()
get_runner_status(args.target_runners, args.token) | 403 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case__ : Optional[Any] = logging.get_logger(__name__)
class _a ( lowerCAmelCase_ ):
"""simple docstring"""
A_ = ["pixel_values"]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 0.9 , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> Union[str, Any]:
super().__init__(**lowercase__ )
UpperCamelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCamelCase_ = get_size_dict(lowercase__ , default_to_square=lowercase__ )
UpperCamelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCamelCase_ = get_size_dict(lowercase__ , param_name='crop_size' )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = crop_pct
UpperCamelCase_ = resample
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> List[str]:
UpperCamelCase_ = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"""size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
UpperCamelCase_ = int(size['shortest_edge'] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
UpperCamelCase_ = int(size['height'] / crop_pct )
else:
UpperCamelCase_ = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct ))
else:
raise ValueError('Invalid size for resize: {}'.format(lowercase__ ) )
UpperCamelCase_ = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ )
else:
if "shortest_edge" in size:
UpperCamelCase_ = get_resize_output_image_size(lowercase__ , size=size['shortest_edge'] , default_to_square=lowercase__ )
elif "height" in size and "width" in size:
UpperCamelCase_ = (size["height"], size["width"])
else:
raise ValueError('Invalid size for resize: {}'.format(lowercase__ ) )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> str:
UpperCamelCase_ = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""size must contain \'height\' and \'width\' as keys. Got {size.keys()}""" )
return center_crop(lowercase__ , size=(size['height'], size['width']) , data_format=lowercase__ , **lowercase__ )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> Dict:
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> str:
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ) -> int:
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = crop_pct if crop_pct is not None else self.crop_pct
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(lowercase__ , default_to_square=lowercase__ )
UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ = get_size_dict(lowercase__ , param_name='crop_size' )
UpperCamelCase_ = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
UpperCamelCase_ = [self.resize(image=lowercase__ , size=lowercase__ , crop_pct=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
UpperCamelCase_ = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
UpperCamelCase_ = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
if do_normalize:
UpperCamelCase_ = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images]
UpperCamelCase_ = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
UpperCamelCase_ = {"pixel_values": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 23 |
"""simple docstring"""
from __future__ import annotations
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, ) ->tuple:
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 575 | 0 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] ,__A : int ,__A : str=13 ,__A : str=7 ,__A : Optional[Any]=True ,__A : Optional[int]=True ,__A : int=True ,__A : List[str]=True ,__A : Dict=99 ,__A : Dict=32 ,__A : Dict=5 ,__A : Dict=4 ,__A : int=37 ,__A : Any="gelu" ,__A : int=0.1 ,__A : int=0.1 ,__A : Dict=512 ,__A : List[str]=16 ,__A : Tuple=2 ,__A : Dict=0.02 ,__A : int=4 ,) -> Optional[int]:
_lowercase = parent
_lowercase = batch_size
_lowercase = seq_length
_lowercase = is_training
_lowercase = use_attention_mask
_lowercase = use_token_type_ids
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = type_sequence_label_size
_lowercase = initializer_range
_lowercase = num_choices
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowercase = None
if self.use_attention_mask:
_lowercase = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase = None
if self.use_token_type_ids:
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowercase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__A ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase = config_and_inputs
_lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase = config_and_inputs
_lowercase = True
_lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : List[Any] ) -> Any:
_lowercase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def __UpperCAmelCase ( self : Dict ) -> Dict:
for model_class_name in self.all_model_classes:
_lowercase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' ,from_pt=__A )
_lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
_lowercase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' ,from_pt=__A )
_lowercase = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ,dtype=jnp.intaa )
_lowercase = model(__A )[0]
_lowercase = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) ,__A )
# compare the actual values for a slice.
_lowercase = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] ,dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] ,__A ,atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
_lowercase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' ,from_pt=__A )
_lowercase = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ,dtype=jnp.intaa )
_lowercase = model(__A )[0]
# compare the actual values for a slice.
_lowercase = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] ,dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] ,__A ,atol=1e-4 ) ) | 535 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = PegasusTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = PegasusTokenizerFast
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ : Optional[int] = True
def __UpperCAmelCase ( self : List[str] ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase = PegasusTokenizer(__A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def __UpperCAmelCase ( self : Any ,**__A : int ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**__A )
def __UpperCAmelCase ( self : Union[str, Any] ,__A : int ) -> List[str]:
return ("This is a test", "This is a test")
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
_lowercase = '</s>'
_lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) ,__A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
_lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<pad>' )
self.assertEqual(vocab_keys[1] ,'</s>' )
self.assertEqual(vocab_keys[-1] ,'v' )
self.assertEqual(len(__A ) ,1103 )
def __UpperCAmelCase ( self : Tuple ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size ,1103 )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
_lowercase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_lowercase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_lowercase = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
_lowercase = rust_tokenizer([raw_input_str] ,return_tensors=__A ,add_special_tokens=__A ).input_ids[0]
_lowercase = py_tokenizer([raw_input_str] ,return_tensors=__A ,add_special_tokens=__A ).input_ids[0]
self.assertListEqual(__A ,__A )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
_lowercase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_lowercase = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
_lowercase = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_lowercase = tokenizer([raw_input_str] ,return_tensors=__A ).input_ids[0]
self.assertListEqual(__A ,__A )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
_lowercase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_lowercase = 'To ensure a smooth flow of bank resolutions.'
_lowercase = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_lowercase = tokenizer([raw_input_str] ,return_tensors=__A ).input_ids[0]
self.assertListEqual(__A ,__A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
_lowercase = ['This is going to be way too long.' * 150, 'short example']
_lowercase = ['not super long but more than 5 tokens', 'tiny']
_lowercase = self._large_tokenizer(__A ,padding=__A ,truncation=__A ,return_tensors='pt' )
_lowercase = self._large_tokenizer(
text_target=__A ,max_length=5 ,padding=__A ,truncation=__A ,return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(__A ) == 2 # input_ids, attention_mask.
@slow
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
# fmt: off
_lowercase = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A ,model_name='google/bigbird-pegasus-large-arxiv' ,revision='ba85d0851d708441f91440d509690f1ab6353415' ,)
@require_sentencepiece
@require_tokenizers
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = PegasusTokenizer
SCREAMING_SNAKE_CASE_ : int = PegasusTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : List[Any] = True
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase = PegasusTokenizer(__A ,offset=0 ,mask_token_sent=__A ,mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def __UpperCAmelCase ( self : Union[str, Any] ,**__A : Union[str, Any] ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**__A )
def __UpperCAmelCase ( self : Union[str, Any] ,__A : int ) -> Tuple:
return ("This is a test", "This is a test")
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
_lowercase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_lowercase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_lowercase = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
_lowercase = rust_tokenizer([raw_input_str] ,return_tensors=__A ,add_special_tokens=__A ).input_ids[0]
_lowercase = py_tokenizer([raw_input_str] ,return_tensors=__A ,add_special_tokens=__A ).input_ids[0]
self.assertListEqual(__A ,__A )
@require_torch
def __UpperCAmelCase ( self : List[str] ) -> Dict:
_lowercase = ['This is going to be way too long.' * 1000, 'short example']
_lowercase = ['not super long but more than 5 tokens', 'tiny']
_lowercase = self._large_tokenizer(__A ,padding=__A ,truncation=__A ,return_tensors='pt' )
_lowercase = self._large_tokenizer(
text_target=__A ,max_length=5 ,padding=__A ,truncation=__A ,return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(__A ) == 2 # input_ids, attention_mask.
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
_lowercase = self._large_tokenizer(__A ).input_ids
self.assertListEqual(
__A ,[182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] ,) | 535 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCAmelCase = None
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
UpperCAmelCase = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = ["""input_ids""", """attention_mask"""]
_UpperCamelCase : List[Any] = MBartTokenizer
_UpperCamelCase : List[int] = []
_UpperCamelCase : List[int] = []
def __init__( self , snake_case=None , snake_case=None , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=None , snake_case=None , snake_case=None , **snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
lowercase = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
super().__init__(
vocab_file=snake_case , tokenizer_file=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , src_lang=snake_case , tgt_lang=snake_case , additional_special_tokens=snake_case , **snake_case , )
lowercase = vocab_file
lowercase = False if not self.vocab_file else True
lowercase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowercase = {
lang_code: self.convert_tokens_to_ids(snake_case ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase = src_lang if src_lang is not None else 'en_XX'
lowercase = self.convert_tokens_to_ids(self._src_lang )
lowercase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , **snake_case ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowercase = src_lang
lowercase = self(snake_case , add_special_tokens=snake_case , return_tensors=snake_case , **snake_case )
lowercase = self.convert_tokens_to_ids(snake_case )
lowercase = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = "en_XX" , snake_case = None , snake_case = "ro_RO" , **snake_case , ):
lowercase = src_lang
lowercase = tgt_lang
return super().prepare_seqaseq_batch(snake_case , snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE__ ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = self.convert_tokens_to_ids(snake_case )
lowercase = []
lowercase = [self.eos_token_id, self.cur_lang_code]
lowercase = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = self.convert_tokens_to_ids(snake_case )
lowercase = []
lowercase = [self.eos_token_id, self.cur_lang_code]
lowercase = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowercase = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ):
copyfile(self.vocab_file , snake_case )
return (out_vocab_file,)
| 84 |
import collections
import os
import re
from pathlib import Path
UpperCAmelCase = '''src/transformers'''
# Matches is_xxx_available()
UpperCAmelCase = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
UpperCAmelCase = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
UpperCAmelCase = re.compile(R'''^\s*try:''')
# Catches a line with else:
UpperCAmelCase = re.compile(R'''^\s*else:''')
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if _re_test_backend.search(__SCREAMING_SNAKE_CASE ) is None:
return None
lowercase = [b[0] for b in _re_backend.findall(__SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase = f.readlines()
lowercase = 0
while line_index < len(__SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ):
lowercase = _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ).groups()[0]
lowercase = re.findall(r'\[([^\]]+)\]' , __SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowercase = _re_import_struct_key_value.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowercase = lines[line_index]
if _re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase = _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase = _re_between_brackets.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase = []
while (
line_index < len(__SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowercase = lines[line_index]
lowercase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(__SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowercase = lines[line_index]
lowercase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
def find_duplicates(__SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(__SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase = []
for key in import_dict_objects.keys():
lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase = 'base imports' if key == 'none' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def UpperCAmelCase_ ( ):
lowercase = []
for root, _, files in os.walk(__SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' )
lowercase = parse_init(__SCREAMING_SNAKE_CASE )
if objects is not None:
lowercase = analyze_results(*__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(__SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase_ ( ):
lowercase = []
for path, directories, files in os.walk(__SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(__SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
lowercase = str((Path(__SCREAMING_SNAKE_CASE ) / folder).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(__SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
lowercase = str((Path(__SCREAMING_SNAKE_CASE ) / fname).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(__SCREAMING_SNAKE_CASE )
return submodules
UpperCAmelCase = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def UpperCAmelCase_ ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowercase = direct_transformers_import(__SCREAMING_SNAKE_CASE )
lowercase = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' ) as f:
lowercase = f.read()
import_structure_keys.update(set(re.findall(r'import_structure\[\"([^\"]*)\"\]' , __SCREAMING_SNAKE_CASE ) ) )
lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase = '\n'.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
F'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 84 | 1 |
'''simple docstring'''
from math import ceil, sqrt
def UpperCamelCase__ ( _lowercase : int = 1_0_0_0_0_0_0 ) -> int:
__UpperCAmelCase: Optional[int] = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__UpperCAmelCase: Union[str, Any] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__UpperCAmelCase: Optional[int] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"""{solution() = }""") | 703 | '''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( _lowercase : list[int] , _lowercase : int , _lowercase : int , _lowercase : int ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__UpperCAmelCase, __UpperCAmelCase: str = array[indexa], array[indexa]
def UpperCamelCase__ ( _lowercase : list[int] , _lowercase : int , _lowercase : int , _lowercase : int ) -> None:
if length > 1:
__UpperCAmelCase: List[str] = int(length / 2 )
for i in range(_lowercase , low + middle ):
comp_and_swap(_lowercase , _lowercase , i + middle , _lowercase )
bitonic_merge(_lowercase , _lowercase , _lowercase , _lowercase )
bitonic_merge(_lowercase , low + middle , _lowercase , _lowercase )
def UpperCamelCase__ ( _lowercase : list[int] , _lowercase : int , _lowercase : int , _lowercase : int ) -> None:
if length > 1:
__UpperCAmelCase: Any = int(length / 2 )
bitonic_sort(_lowercase , _lowercase , _lowercase , 1 )
bitonic_sort(_lowercase , low + middle , _lowercase , 0 )
bitonic_merge(_lowercase , _lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE_ = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 466 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = KandinskyInpaintPipeline
__SCREAMING_SNAKE_CASE = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
__SCREAMING_SNAKE_CASE = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__SCREAMING_SNAKE_CASE = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__SCREAMING_SNAKE_CASE = False
@property
def _lowerCamelCase ( self) -> int:
return 3_2
@property
def _lowerCamelCase ( self) -> Tuple:
return 3_2
@property
def _lowerCamelCase ( self) -> Optional[int]:
return self.time_input_dim
@property
def _lowerCamelCase ( self) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self) -> Optional[int]:
return 1_0_0
@property
def _lowerCamelCase ( self) -> Tuple:
_A : Any = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base")
return tokenizer
@property
def _lowerCamelCase ( self) -> Optional[Any]:
torch.manual_seed(0)
_A : str = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
_A : str = MultilingualCLIP(__lowercase)
_A : str = text_encoder.eval()
return text_encoder
@property
def _lowerCamelCase ( self) -> Any:
torch.manual_seed(0)
_A : Optional[Any] = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_A : Optional[int] = UNetaDConditionModel(**__lowercase)
return model
@property
def _lowerCamelCase ( self) -> int:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCamelCase ( self) -> List[str]:
torch.manual_seed(0)
_A : Dict = VQModel(**self.dummy_movq_kwargs)
return model
def _lowerCamelCase ( self) -> Dict:
_A : Any = self.dummy_text_encoder
_A : Tuple = self.dummy_tokenizer
_A : Optional[Any] = self.dummy_unet
_A : List[str] = self.dummy_movq
_A : int = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , steps_offset=1 , prediction_type="epsilon" , thresholding=__lowercase , )
_A : Union[str, Any] = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase=0) -> int:
_A : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowercase)).to(__lowercase)
_A : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(__lowercase)
# create init_image
_A : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__lowercase)).to(__lowercase)
_A : List[Any] = image.cpu().permute(0 , 2 , 3 , 1)[0]
_A : int = Image.fromarray(np.uinta(__lowercase)).convert("RGB").resize((2_5_6, 2_5_6))
# create mask
_A : Optional[int] = np.ones((6_4, 6_4) , dtype=np.floataa)
_A : Tuple = 0
if str(__lowercase).startswith("mps"):
_A : List[str] = torch.manual_seed(__lowercase)
else:
_A : Tuple = torch.Generator(device=__lowercase).manual_seed(__lowercase)
_A : Dict = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _lowerCamelCase ( self) -> Tuple:
_A : Union[str, Any] = "cpu"
_A : str = self.get_dummy_components()
_A : str = self.pipeline_class(**__lowercase)
_A : Dict = pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
_A : List[Any] = pipe(**self.get_dummy_inputs(__lowercase))
_A : Optional[Any] = output.images
_A : List[str] = pipe(
**self.get_dummy_inputs(__lowercase) , return_dict=__lowercase , )[0]
_A : List[str] = image[0, -3:, -3:, -1]
_A : str = image_from_tuple[0, -3:, -3:, -1]
print(F"image.shape {image.shape}")
assert image.shape == (1, 6_4, 6_4, 3)
_A : Tuple = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def _lowerCamelCase ( self) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self) -> Dict:
_A : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy")
_A : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
_A : Optional[Any] = np.ones((7_6_8, 7_6_8) , dtype=np.floataa)
_A : Optional[int] = 0
_A : int = "a hat"
_A : List[Any] = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa)
pipe_prior.to(__lowercase)
_A : List[Any] = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa)
_A : Tuple = pipeline.to(__lowercase)
pipeline.set_progress_bar_config(disable=__lowercase)
_A : Optional[Any] = torch.Generator(device="cpu").manual_seed(0)
_A , _A : Dict = pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
_A : Optional[Any] = pipeline(
__lowercase , image=__lowercase , mask_image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="np" , )
_A : Any = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
| 503 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowercase , )
assert hasattr(self , "env" )
def snake_case__ ( self : Dict , __lowercase : Optional[Any] ):
"""simple docstring"""
snake_case_ = f"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
snake_case_ = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowercase , instance_count=__lowercase , instance_type=self.instance_type , debugger_hook_config=__lowercase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowercase , py_version="py36" , )
def snake_case__ ( self : Union[str, Any] , __lowercase : Tuple ):
"""simple docstring"""
TrainingJobAnalytics(__lowercase ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(2,)] )
def snake_case__ ( self : Union[str, Any] , __lowercase : Union[str, Any] ):
"""simple docstring"""
snake_case_ = self.create_estimator(__lowercase )
# run training
estimator.fit()
# result dataframe
snake_case_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
snake_case_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case_ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowercase )
| 376 | 0 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 32
def lowercase_ ( __A : Accelerator , __A : int = 1_6 ) -> List[str]:
"""simple docstring"""
lowercase : Optional[int] =AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase : str =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__A : str ):
# max_length=None => use the model max length (it's actually the default)
lowercase : Tuple =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__A , max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase : Union[str, Any] =datasets.map(
__A , batched=__A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase : Optional[Any] =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__A : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase : List[Any] =1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase : Optional[Any] =1_6
elif accelerator.mixed_precision != "no":
lowercase : str =8
else:
lowercase : Optional[Any] =None
return tokenizer.pad(
__A , padding='''longest''' , max_length=__A , pad_to_multiple_of=__A , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase : Dict =DataLoader(
tokenized_datasets['''train'''] , shuffle=__A , collate_fn=__A , batch_size=__A )
lowercase : Optional[int] =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__A , collate_fn=__A , batch_size=__A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE = mocked_dataloaders # noqa: F811
def lowercase_ ( __A : int , __A : int ) -> Dict:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __A ) == "1":
lowercase : Union[str, Any] =2
# Initialize accelerator
lowercase : str =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase : Dict =config['''lr''']
lowercase : Tuple =int(config['''num_epochs'''] )
lowercase : List[Any] =int(config['''seed'''] )
lowercase : Tuple =int(config['''batch_size'''] )
lowercase : Union[str, Any] =evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__A )
def inner_training_loop(__A : Tuple ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase : List[Any] =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase : Optional[Any] =model.to(accelerator.device )
# Instantiate optimizer
lowercase : Any =AdamW(params=model.parameters() , lr=__A )
lowercase , lowercase : Tuple =get_dataloaders(__A , __A )
# Instantiate scheduler
lowercase : Any =get_linear_schedule_with_warmup(
optimizer=__A , num_warmup_steps=1_0_0 , num_training_steps=(len(__A ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase : List[Any] =accelerator.prepare(
__A , __A , __A , __A , __A )
# Now we train the model
for epoch in range(__A ):
model.train()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase : List[str] =model(**__A )
lowercase : int =outputs.loss
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase : List[Any] =model(**__A )
lowercase : Optional[Any] =outputs.logits.argmax(dim=-1 )
lowercase , lowercase : List[Any] =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__A , references=__A , )
lowercase : Optional[Any] =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , __A )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowercase_ ( ) -> List[str]:
"""simple docstring"""
lowercase : Any =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__A , default=__A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase : Tuple =parser.parse_args()
lowercase : Optional[Any] ={'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 8 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE = 8.988E9 # units = N * m^s * C^-2
def lowercase_ ( __A : float , __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
lowercase : Dict =abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
lowercase : Union[str, Any] =COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowercase : Tuple =(COULOMBS_CONSTANT * charge_product / abs(__A )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _A :
def __init__( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : str=13 , __magic_name__ : Tuple=10 , __magic_name__ : Union[str, Any]=3 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Optional[Any]=2 , __magic_name__ : str=2 , __magic_name__ : Dict=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Optional[int]=32 , __magic_name__ : str=5 , __magic_name__ : Tuple=4 , __magic_name__ : Tuple=37 , __magic_name__ : Union[str, Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : int=0.1 , __magic_name__ : Tuple=10 , __magic_name__ : Optional[int]=0.02 , __magic_name__ : Optional[Any]=0.9 , __magic_name__ : str=None , ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = parent
__snake_case : Tuple = batch_size
__snake_case : List[Any] = image_size
__snake_case : Union[str, Any] = num_channels
__snake_case : int = patch_size
__snake_case : Dict = tubelet_size
__snake_case : Union[str, Any] = num_frames
__snake_case : Union[str, Any] = is_training
__snake_case : Dict = use_labels
__snake_case : Tuple = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : List[str] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : List[str] = type_sequence_label_size
__snake_case : List[str] = initializer_range
__snake_case : Optional[Any] = mask_ratio
__snake_case : int = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
__snake_case : Tuple = (image_size // patch_size) ** 2
__snake_case : Optional[Any] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
__snake_case : Optional[Any] = int(mask_ratio * self.seq_length )
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[int] = None
if self.use_labels:
__snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : Tuple ) -> int:
"""simple docstring"""
__snake_case : Tuple = VideoMAEModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[int] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : Any ) -> int:
"""simple docstring"""
__snake_case : Dict = VideoMAEForPreTraining(__magic_name__ )
model.to(__magic_name__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__snake_case : List[str] = torch.ones((self.num_masks,) )
__snake_case : Tuple = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
__snake_case : Any = mask.expand(self.batch_size , -1 ).bool()
__snake_case : Union[str, Any] = model(__magic_name__ , __magic_name__ )
# model only returns predictions for masked patches
__snake_case : List[Any] = mask.sum().item()
__snake_case : Any = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def lowercase__ ( self : int ) -> Any:
"""simple docstring"""
__snake_case : List[str] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Dict = config_and_inputs
__snake_case : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Union[str, Any] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowercase__: Any = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowercase__: int = False
lowercase__: Optional[Any] = False
lowercase__: int = False
lowercase__: List[Any] = False
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = VideoMAEModelTester(self )
__snake_case : str = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : int , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str=False ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = copy.deepcopy(__magic_name__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__snake_case : str = torch.ones((self.model_tester.num_masks,) )
__snake_case : str = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
__snake_case : Any = mask.expand(self.model_tester.batch_size , -1 ).bool()
__snake_case : str = bool_masked_pos.to(__magic_name__ )
if return_labels:
if model_class in [
*get_values(__magic_name__ ),
]:
__snake_case : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : List[str] ) -> str:
"""simple docstring"""
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[Any] = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def lowercase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Any = model_class(__magic_name__ )
__snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Any = [*signature.parameters.keys()]
__snake_case : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : int ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__magic_name__ )
@slow
def lowercase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[Any] = VideoMAEModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowercase__ ( self : Any ) -> List[str]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = True
for model_class in self.all_model_classes:
__snake_case : List[str] = self.model_tester.seq_length - self.model_tester.num_masks
__snake_case : Union[str, Any] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
__snake_case : str = True
__snake_case : int = False
__snake_case : List[str] = True
__snake_case : Dict = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : List[str] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : Any = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : str = True
__snake_case : Union[str, Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Optional[Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : Optional[Any] = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__snake_case : Tuple = len(__magic_name__ )
# Check attention is always last and order is fine
__snake_case : Dict = True
__snake_case : Optional[int] = True
__snake_case : Optional[int] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Any = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + 1 , len(__magic_name__ ) )
__snake_case : List[str] = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ):
__snake_case : Optional[int] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : Any = outputs.hidden_states
__snake_case : Union[str, Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
__snake_case : Any = self.model_tester.seq_length - self.model_tester.num_masks
__snake_case : List[str] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Optional[int] = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
pass
def _a ( ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__snake_case : Union[str, Any] = np.load(_lowerCamelCase )
return list(_lowerCamelCase )
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : str ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
__magic_name__ )
__snake_case : Optional[Any] = self.default_image_processor
__snake_case : List[Any] = prepare_video()
__snake_case : Union[str, Any] = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : int = model(**__magic_name__ )
# verify the logits
__snake_case : Optional[int] = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : str = torch.tensor([0.3669, -0.0688, -0.2421] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def lowercase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(__magic_name__ )
__snake_case : List[Any] = self.default_image_processor
__snake_case : List[str] = prepare_video()
__snake_case : Union[str, Any] = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# add boolean mask, indicating which patches to mask
__snake_case : str = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__snake_case : Tuple = torch.load(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : str = model(**__magic_name__ )
# verify the logits
__snake_case : Tuple = torch.Size([1, 14_08, 15_36] )
__snake_case : Tuple = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=__magic_name__ )
self.assertEqual(outputs.logits.shape , __magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
__snake_case : int = torch.tensor([0.5142] , device=__magic_name__ )
self.assertTrue(torch.allclose(outputs.loss , __magic_name__ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
__snake_case : Union[str, Any] = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=__magic_name__ ).to(
__magic_name__ )
with torch.no_grad():
__snake_case : Optional[Any] = model(**__magic_name__ )
__snake_case : int = torch.tensor(torch.tensor([0.6469] ) , device=__magic_name__ )
self.assertTrue(torch.allclose(outputs.loss , __magic_name__ , atol=1E-4 ) )
| 26 | '''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = ["image_processor", "tokenizer"]
__UpperCamelCase: Union[str, Any] = "LayoutLMv3ImageProcessor"
__UpperCamelCase: List[Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : Optional[Any] , A : Dict=None , A : List[Any]=None , **A : Tuple ):
_UpperCAmelCase : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , A , )
_UpperCAmelCase : List[str] = kwargs.pop("feature_extractor" )
_UpperCAmelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(A , A )
def __call__( self : Union[str, Any] , A : List[str] , A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , A : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , A : Union[List[List[int]], List[List[List[int]]]] = None , A : Optional[Union[List[int], List[List[int]]]] = None , A : bool = True , A : Union[bool, str, PaddingStrategy] = False , A : Union[bool, str, TruncationStrategy] = None , A : Optional[int] = None , A : int = 0 , A : Optional[int] = None , A : Optional[bool] = None , A : Optional[bool] = None , A : bool = False , A : bool = False , A : bool = False , A : bool = False , A : bool = True , A : Optional[Union[str, TensorType]] = None , **A : Union[str, Any] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
_UpperCAmelCase : Any = self.image_processor(images=A , return_tensors=A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(A , A ):
_UpperCAmelCase : List[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
_UpperCAmelCase : Optional[Any] = features["words"]
_UpperCAmelCase : int = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_token_type_ids=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_length=A , verbose=A , return_tensors=A , **A , )
# add pixel values
_UpperCAmelCase : Union[str, Any] = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_UpperCAmelCase : List[str] = self.get_overflowing_images(A , encoded_inputs["overflow_to_sample_mapping"] )
_UpperCAmelCase : Tuple = images
return encoded_inputs
def _A ( self : List[Any] , A : Optional[int] , A : Dict ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_UpperCAmelCase : Dict = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(A ) != len(A ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F""" {len(A )} and {len(A )}""" )
return images_with_overflow
def _A ( self : Any , *A : Union[str, Any] , **A : List[str] ):
return self.tokenizer.batch_decode(*A , **A )
def _A ( self : List[str] , *A : Any , **A : Dict ):
return self.tokenizer.decode(*A , **A )
@property
def _A ( self : Tuple ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _A ( self : Dict ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , A , )
return self.image_processor_class
@property
def _A ( self : Dict ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , A , )
return self.image_processor
| 244 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
_snake_case = (PNDMScheduler,)
_snake_case = (('''num_inference_steps''', 50),)
def a__ ( self , **a_ ) -> Optional[Any]:
lowercase : Any = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a_ )
return config
def a__ ( self , a_=0 , **a_ ) -> int:
lowercase : Dict = dict(self.forward_default_kwargs )
lowercase : Dict = kwargs.pop("num_inference_steps" , a_ )
lowercase : List[Any] = self.dummy_sample
lowercase : Union[str, Any] = 0.1 * sample
lowercase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase : Optional[int] = self.get_scheduler_config(**a_ )
lowercase : Optional[int] = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
lowercase : Optional[int] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
lowercase : str = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
lowercase : Tuple = dummy_past_residuals[:]
lowercase : Any = scheduler.step_prk(a_ , a_ , a_ , **a_ ).prev_sample
lowercase : List[Any] = new_scheduler.step_prk(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowercase : Tuple = scheduler.step_plms(a_ , a_ , a_ , **a_ ).prev_sample
lowercase : int = new_scheduler.step_plms(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ ( self ) -> Dict:
pass
def a__ ( self , a_=0 , **a_ ) -> Optional[int]:
lowercase : str = dict(self.forward_default_kwargs )
lowercase : Any = kwargs.pop("num_inference_steps" , a_ )
lowercase : Optional[int] = self.dummy_sample
lowercase : Optional[int] = 0.1 * sample
lowercase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase : Union[str, Any] = self.get_scheduler_config()
lowercase : List[str] = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
lowercase : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
lowercase : List[str] = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
lowercase : Tuple = dummy_past_residuals[:]
lowercase : Optional[int] = scheduler.step_prk(a_ , a_ , a_ , **a_ ).prev_sample
lowercase : Tuple = new_scheduler.step_prk(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowercase : List[Any] = scheduler.step_plms(a_ , a_ , a_ , **a_ ).prev_sample
lowercase : int = new_scheduler.step_plms(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ ( self , **a_ ) -> Optional[Any]:
lowercase : Optional[int] = self.scheduler_classes[0]
lowercase : int = self.get_scheduler_config(**a_ )
lowercase : int = scheduler_class(**a_ )
lowercase : Tuple = 1_0
lowercase : Tuple = self.dummy_model()
lowercase : int = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.prk_timesteps ):
lowercase : int = model(a_ , a_ )
lowercase : Union[str, Any] = scheduler.step_prk(a_ , a_ , a_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowercase : List[str] = model(a_ , a_ )
lowercase : Union[str, Any] = scheduler.step_plms(a_ , a_ , a_ ).prev_sample
return sample
def a__ ( self ) -> Tuple:
lowercase : Tuple = dict(self.forward_default_kwargs )
lowercase : Optional[int] = kwargs.pop("num_inference_steps" , a_ )
for scheduler_class in self.scheduler_classes:
lowercase : Optional[int] = self.get_scheduler_config()
lowercase : Tuple = scheduler_class(**a_ )
lowercase : int = self.dummy_sample
lowercase : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(a_ , "set_timesteps" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_ , "set_timesteps" ):
lowercase : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase : List[str] = dummy_past_residuals[:]
lowercase : Optional[Any] = scheduler.step_prk(a_ , 0 , a_ , **a_ ).prev_sample
lowercase : Tuple = scheduler.step_prk(a_ , 1 , a_ , **a_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowercase : List[str] = scheduler.step_plms(a_ , 0 , a_ , **a_ ).prev_sample
lowercase : Optional[Any] = scheduler.step_plms(a_ , 1 , a_ , **a_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def a__ ( self ) -> List[Any]:
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=a_ )
def a__ ( self ) -> List[str]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a_ )
lowercase : int = self.scheduler_classes[0]
lowercase : Dict = self.get_scheduler_config(steps_offset=1 )
lowercase : List[Any] = scheduler_class(**a_ )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def a__ ( self ) -> Any:
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=a_ , beta_end=a_ )
def a__ ( self ) -> str:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a_ )
def a__ ( self ) -> int:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a_ )
def a__ ( self ) -> List[str]:
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=a_ )
def a__ ( self ) -> List[str]:
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=a_ )
def a__ ( self ) -> Tuple:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
lowercase : Optional[Any] = 2_7
for scheduler_class in self.scheduler_classes:
lowercase : int = self.dummy_sample
lowercase : str = 0.1 * sample
lowercase : Optional[int] = self.get_scheduler_config()
lowercase : Optional[int] = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowercase : Tuple = scheduler.step_prk(a_ , a_ , a_ ).prev_sample
def a__ ( self ) -> List[str]:
with self.assertRaises(a_ ):
lowercase : int = self.scheduler_classes[0]
lowercase : List[Any] = self.get_scheduler_config()
lowercase : List[str] = scheduler_class(**a_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def a__ ( self ) -> Union[str, Any]:
lowercase : Any = self.full_loop()
lowercase : Union[str, Any] = torch.sum(torch.abs(a_ ) )
lowercase : List[str] = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1e-2
assert abs(result_mean.item() - 0.25_80 ) < 1e-3
def a__ ( self ) -> List[Any]:
lowercase : Union[str, Any] = self.full_loop(prediction_type="v_prediction" )
lowercase : Dict = torch.sum(torch.abs(a_ ) )
lowercase : str = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1e-2
assert abs(result_mean.item() - 0.08_78 ) < 1e-3
def a__ ( self ) -> Optional[int]:
# We specify different beta, so that the first alpha is 0.99
lowercase : Union[str, Any] = self.full_loop(set_alpha_to_one=a_ , beta_start=0.01 )
lowercase : Union[str, Any] = torch.sum(torch.abs(a_ ) )
lowercase : int = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1e-2
assert abs(result_mean.item() - 0.29_95 ) < 1e-3
def a__ ( self ) -> Optional[Any]:
# We specify different beta, so that the first alpha is 0.99
lowercase : int = self.full_loop(set_alpha_to_one=a_ , beta_start=0.01 )
lowercase : str = torch.sum(torch.abs(a_ ) )
lowercase : List[str] = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1e-2
assert abs(result_mean.item() - 0.24_34 ) < 1e-3
| 425 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
_snake_case = IFImgaImgSuperResolutionPipeline
_snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''})
_snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''}
def a__ ( self ) -> Optional[int]:
return self._get_superresolution_dummy_components()
def a__ ( self , a_ , a_=0 ) -> Union[str, Any]:
if str(a_ ).startswith("mps" ):
lowercase : Dict = torch.manual_seed(a_ )
else:
lowercase : Tuple = torch.Generator(device=a_ ).manual_seed(a_ )
lowercase : Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(a_ ) ).to(a_ )
lowercase : str = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(a_ ) ).to(a_ )
lowercase : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a__ ( self ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def a__ ( self ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def a__ ( self ) -> Dict:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def a__ ( self ) -> str:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def a__ ( self ) -> List[Any]:
self._test_save_load_local()
def a__ ( self ) -> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 425 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : str ) -> bool:
lowerCamelCase_ = 0
for ch in input_str:
lowerCamelCase_ = ord(_lowerCamelCase )
lowerCamelCase_ = pow(2 , _lowerCamelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 549 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Optional[Any] = KandinskyImgaImgPipeline
SCREAMING_SNAKE_CASE : Tuple = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
SCREAMING_SNAKE_CASE : int = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
SCREAMING_SNAKE_CASE : List[str] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
SCREAMING_SNAKE_CASE : Any = False
@property
def UpperCamelCase ( self : List[str] ) -> str:
return 32
@property
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
return 32
@property
def UpperCamelCase ( self : Optional[int] ) -> str:
return self.time_input_dim
@property
def UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def UpperCamelCase ( self : int ) -> List[Any]:
return 100
@property
def UpperCamelCase ( self : Any ) -> List[Any]:
lowerCamelCase_ = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def UpperCamelCase ( self : str ) -> Optional[Any]:
torch.manual_seed(0 )
lowerCamelCase_ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowerCamelCase_ = MultilingualCLIP(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase ( self : int ) -> List[Any]:
torch.manual_seed(0 )
lowerCamelCase_ = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase_ = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
torch.manual_seed(0 )
lowerCamelCase_ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase_ = self.dummy_text_encoder
lowerCamelCase_ = self.dummy_tokenizer
lowerCamelCase_ = self.dummy_unet
lowerCamelCase_ = self.dummy_movq
lowerCamelCase_ = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase_ = DDIMScheduler(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict=0 ) -> str:
lowerCamelCase_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__SCREAMING_SNAKE_CASE )
# create init_image
lowerCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert('RGB' ).resize((256, 256) )
if str(__SCREAMING_SNAKE_CASE ).startswith('mps' ):
lowerCamelCase_ = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
lowerCamelCase_ = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self : Tuple ) -> Any:
lowerCamelCase_ = 'cpu'
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ = output.images
lowerCamelCase_ = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase ( self : Tuple ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : Any ) -> int:
lowerCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
lowerCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase_ = 'A red cartoon frog, 4k'
lowerCamelCase_ = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
lowerCamelCase_ = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase_ , lowerCamelCase_ = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCamelCase_ = pipeline(
__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
lowerCamelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 549 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ : int = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
__magic_name__ = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
__magic_name__ = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
__magic_name__ = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
__magic_name__ = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
__magic_name__ = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(__snake_case )-1}' )
if "norm" in key:
__magic_name__ = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
__magic_name__ = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
__magic_name__ = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(__snake_case )-1}' )
if "layer_norm1" in key:
__magic_name__ = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
__magic_name__ = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
__magic_name__ = key[key.find('''block''' ) + len('''block''' )]
__magic_name__ = key.replace(f'block{idx}' , f'block.{int(__snake_case )-1}' )
if "attn.q" in key:
__magic_name__ = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
__magic_name__ = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
__magic_name__ = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
__magic_name__ = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
__magic_name__ = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
__magic_name__ = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
__magic_name__ = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
__magic_name__ = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
__magic_name__ = key[key.find('''linear_c''' ) + len('''linear_c''' )]
__magic_name__ = key.replace(f'linear_c{idx}' , f'linear_c.{int(__snake_case )-1}' )
if "bot_conv" in key:
__magic_name__ = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
__magic_name__ = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
__magic_name__ = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
__magic_name__ = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
__magic_name__ = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
__magic_name__ = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
__magic_name__ = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
__magic_name__ = key.replace('''module.last_layer_depth''' , '''head.head''' )
__magic_name__ = value
return new_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
__magic_name__ = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
__magic_name__ = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
__magic_name__ = kv_weight[
: config.hidden_sizes[i], :
]
__magic_name__ = kv_bias[: config.hidden_sizes[i]]
__magic_name__ = kv_weight[
config.hidden_sizes[i] :, :
]
__magic_name__ = kv_bias[config.hidden_sizes[i] :]
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
__magic_name__ = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return image
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict=False , snake_case_ : Tuple=None ):
__magic_name__ = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
__magic_name__ = GLPNImageProcessor()
# prepare image
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=__snake_case , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
__magic_name__ = torch.load(__snake_case , map_location=torch.device('''cpu''' ) )
# rename keys
__magic_name__ = rename_keys(__snake_case )
# key and value matrices need special treatment
read_in_k_v(__snake_case , __snake_case )
# create HuggingFace model and load state dict
__magic_name__ = GLPNForDepthEstimation(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# forward pass
__magic_name__ = model(__snake_case )
__magic_name__ = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
__magic_name__ = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
__magic_name__ = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f'Unknown model name: {model_name}' )
__magic_name__ = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , __snake_case , atol=1E-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(__snake_case , __snake_case ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=__snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(__snake_case , __snake_case ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=__snake_case , )
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
a_ : str = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name) | 713 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ : Optional[int] = 16
a_ : int = 32
def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ):
__magic_name__ = AutoTokenizer.from_pretrained(snake_case_ )
__magic_name__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__ = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
__magic_name__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str ):
model.eval()
__magic_name__ = 0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__magic_name__ , __magic_name__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case_ ) - 1:
__magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
__magic_name__ = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
# Initialize accelerator
__magic_name__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config['''lr''']
__magic_name__ = int(config['''num_epochs'''] )
__magic_name__ = int(config['''seed'''] )
__magic_name__ = int(config['''batch_size'''] )
__magic_name__ = args.model_name_or_path
set_seed(snake_case_ )
__magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ )
# Instantiate optimizer
__magic_name__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__magic_name__ = 1
__magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , )
else:
__magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
__magic_name__ = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__ = 0
__magic_name__ = evaluate.load('''glue''' , '''mrpc''' )
__magic_name__ = num_epochs
if args.partial_train_epoch is not None:
__magic_name__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1]
__magic_name__ = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__magic_name__ = int(snake_case_ ) + 1
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.print('''resumed checkpoint performance:''' , snake_case_ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f:
__magic_name__ = json.load(snake_case_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__magic_name__ = {}
for epoch in range(snake_case_ , snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__magic_name__ = f'epoch_{epoch}'
__magic_name__ = os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__magic_name__ = accuracy
__magic_name__ = lr_scheduler.get_lr()[0]
__magic_name__ = optimizer.param_groups[0]['''lr''']
__magic_name__ = epoch
__magic_name__ = overall_step
accelerator.print(f'epoch {epoch}:' , snake_case_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , )
parser.add_argument(
'''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , )
__magic_name__ = parser.parse_args()
__magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main() | 678 | 0 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
A__ = logging.get_logger(__name__)
class a ( __lowerCamelCase ):
def __init__( self :str ,*__lowercase :Dict ,**__lowercase :Union[str, Any] ):
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' ,__lowercase ,)
super().__init__(*__lowercase ,**__lowercase )
| 252 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
A__ = logging.get_logger(__name__)
class a :
__lowerCAmelCase : Optional[Any] = None
@experimental
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return _map_with_joblib(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[str] = num_proc if num_proc <= len(__lowerCAmelCase ) else len(__lowerCAmelCase )
snake_case__ : int = [] # We organize the splits ourselve (contiguous splits)
for index in range(__lowerCAmelCase ):
snake_case__ : List[Any] = len(__lowerCAmelCase ) // num_proc
snake_case__ : Tuple = len(__lowerCAmelCase ) % num_proc
snake_case__ : Any = div * index + min(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : int = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__lowerCAmelCase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f"""Error dividing inputs iterable among processes. """
f"""Total number of objects {len(__lowerCAmelCase )}, """
f"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
f"""Spawning {num_proc} processes for {len(__lowerCAmelCase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
snake_case__ , snake_case__ : List[str] = None, None
if not disable_tqdm:
snake_case__ , snake_case__ : Any = (RLock(),), tqdm.set_lock
with Pool(__lowerCAmelCase , initargs=__lowerCAmelCase , initializer=__lowerCAmelCase ) as pool:
snake_case__ : Optional[int] = pool.map(__lowerCAmelCase , __lowerCAmelCase )
logger.info(f"""Finished {num_proc} processes""" )
snake_case__ : List[str] = [obj for proc_res in mapped for obj in proc_res]
logger.info(f"""Unpacked {len(__lowerCAmelCase )} objects""" )
return mapped
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__lowerCAmelCase ):
return joblib.Parallel()(
joblib.delayed(__lowerCAmelCase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def _lowerCAmelCase ( __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Tuple = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
snake_case__ : int = None
| 252 | 1 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCamelCase__ = logging.get_logger(__name__)
# General docstring
UpperCamelCase__ = 'ResNetConfig'
# Base docstring
UpperCamelCase__ = 'microsoft/resnet-50'
UpperCamelCase__ = [1, 20_48, 7, 7]
# Image classification docstring
UpperCamelCase__ = 'microsoft/resnet-50'
UpperCamelCase__ = 'tiger cat'
UpperCamelCase__ = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class a ( nn.Module ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 3 , UpperCamelCase_ = 1 , UpperCamelCase_ = "relu" ):
super().__init__()
UpperCAmelCase__ : Union[str, Any] = nn.Convad(
UpperCamelCase_ , UpperCamelCase_ , kernel_size=UpperCamelCase_ , stride=UpperCamelCase_ , padding=kernel_size // 2 , bias=UpperCamelCase_ )
UpperCAmelCase__ : Any = nn.BatchNormad(UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : List[str] = self.convolution(UpperCamelCase_ )
UpperCAmelCase__ : List[str] = self.normalization(UpperCamelCase_ )
UpperCAmelCase__ : Dict = self.activation(UpperCamelCase_ )
return hidden_state
class a ( nn.Module ):
def __init__( self , UpperCamelCase_ ):
super().__init__()
UpperCAmelCase__ : int = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
UpperCAmelCase__ : Optional[int] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
UpperCAmelCase__ : Union[str, Any] = config.num_channels
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
UpperCAmelCase__ : Optional[Any] = self.embedder(UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = self.pooler(UpperCamelCase_ )
return embedding
class a ( nn.Module ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 2 ):
super().__init__()
UpperCAmelCase__ : int = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , kernel_size=1 , stride=UpperCamelCase_ , bias=UpperCamelCase_ )
UpperCAmelCase__ : List[str] = nn.BatchNormad(UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : Tuple = self.convolution(UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = self.normalization(UpperCamelCase_ )
return hidden_state
class a ( nn.Module ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 1 , UpperCamelCase_ = "relu" ):
super().__init__()
UpperCAmelCase__ : Any = in_channels != out_channels or stride != 1
UpperCAmelCase__ : str = (
ResNetShortCut(UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ ) if should_apply_shortcut else nn.Identity()
)
UpperCAmelCase__ : Optional[int] = nn.Sequential(
ResNetConvLayer(UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ ) , ResNetConvLayer(UpperCamelCase_ , UpperCamelCase_ , activation=UpperCamelCase_ ) , )
UpperCAmelCase__ : List[str] = ACTaFN[activation]
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : str = hidden_state
UpperCAmelCase__ : str = self.layer(UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = self.shortcut(UpperCamelCase_ )
hidden_state += residual
UpperCAmelCase__ : str = self.activation(UpperCamelCase_ )
return hidden_state
class a ( nn.Module ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 1 , UpperCamelCase_ = "relu" , UpperCamelCase_ = 4 ):
super().__init__()
UpperCAmelCase__ : Optional[int] = in_channels != out_channels or stride != 1
UpperCAmelCase__ : int = out_channels // reduction
UpperCAmelCase__ : Dict = (
ResNetShortCut(UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ ) if should_apply_shortcut else nn.Identity()
)
UpperCAmelCase__ : List[Any] = nn.Sequential(
ResNetConvLayer(UpperCamelCase_ , UpperCamelCase_ , kernel_size=1 ) , ResNetConvLayer(UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ ) , ResNetConvLayer(UpperCamelCase_ , UpperCamelCase_ , kernel_size=1 , activation=UpperCamelCase_ ) , )
UpperCAmelCase__ : List[Any] = ACTaFN[activation]
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : Dict = hidden_state
UpperCAmelCase__ : Optional[int] = self.layer(UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = self.shortcut(UpperCamelCase_ )
hidden_state += residual
UpperCAmelCase__ : str = self.activation(UpperCamelCase_ )
return hidden_state
class a ( nn.Module ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 2 , UpperCamelCase_ = 2 , ):
super().__init__()
UpperCAmelCase__ : str = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
UpperCAmelCase__ : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ , activation=config.hidden_act ) , *[layer(UpperCamelCase_ , UpperCamelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : List[Any] = input
for layer in self.layers:
UpperCAmelCase__ : List[str] = layer(UpperCamelCase_ )
return hidden_state
class a ( nn.Module ):
def __init__( self , UpperCamelCase_ ):
super().__init__()
UpperCAmelCase__ : List[str] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
UpperCamelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
UpperCAmelCase__ : List[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(UpperCamelCase_ , config.depths[1:] ):
self.stages.append(ResNetStage(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , depth=UpperCamelCase_ ) )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = True ):
UpperCAmelCase__ : List[str] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCAmelCase__ : Tuple = hidden_states + (hidden_state,)
UpperCAmelCase__ : int = stage_module(UpperCamelCase_ )
if output_hidden_states:
UpperCAmelCase__ : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCamelCase_ , hidden_states=UpperCamelCase_ , )
class a ( lowercase ):
UpperCamelCase : int = ResNetConfig
UpperCamelCase : Optional[int] = """resnet"""
UpperCamelCase : List[Any] = """pixel_values"""
UpperCamelCase : Dict = True
def __snake_case ( self , UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(UpperCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : int = value
UpperCamelCase__ = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
UpperCamelCase__ = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , lowercase , )
class a ( lowercase ):
def __init__( self , UpperCamelCase_ ):
super().__init__(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = config
UpperCAmelCase__ : Union[str, Any] = ResNetEmbeddings(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = ResNetEncoder(UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None ):
UpperCAmelCase__ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ : int = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : Union[str, Any] = self.embedder(UpperCamelCase_ )
UpperCAmelCase__ : int = self.encoder(
UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ )
UpperCAmelCase__ : Any = encoder_outputs[0]
UpperCAmelCase__ : List[str] = self.pooler(UpperCamelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase_ , pooler_output=UpperCamelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , lowercase , )
class a ( lowercase ):
def __init__( self , UpperCamelCase_ ):
super().__init__(UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = config.num_labels
UpperCAmelCase__ : Optional[int] = ResNetModel(UpperCamelCase_ )
# classification head
UpperCAmelCase__ : str = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __snake_case ( self , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , ):
UpperCAmelCase__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : List[str] = self.resnet(UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ )
UpperCAmelCase__ : int = outputs.pooler_output if return_dict else outputs[1]
UpperCAmelCase__ : Optional[Any] = self.classifier(UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCAmelCase__ : List[str] = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCAmelCase__ : Any = 'single_label_classification'
else:
UpperCAmelCase__ : int = 'multi_label_classification'
if self.config.problem_type == "regression":
UpperCAmelCase__ : List[str] = MSELoss()
if self.num_labels == 1:
UpperCAmelCase__ : Union[str, Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCAmelCase__ : Dict = loss_fct(UpperCamelCase_ , UpperCamelCase_ )
elif self.config.problem_type == "single_label_classification":
UpperCAmelCase__ : Optional[int] = CrossEntropyLoss()
UpperCAmelCase__ : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCAmelCase__ : Union[str, Any] = BCEWithLogitsLoss()
UpperCAmelCase__ : Optional[int] = loss_fct(UpperCamelCase_ , UpperCamelCase_ )
if not return_dict:
UpperCAmelCase__ : Union[str, Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase_ , logits=UpperCamelCase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" , lowercase , )
class a ( lowercase , lowercase ):
def __init__( self , UpperCamelCase_ ):
super().__init__(UpperCamelCase_ )
super()._init_backbone(UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = [config.embedding_size] + config.hidden_sizes
UpperCAmelCase__ : Tuple = ResNetEmbeddings(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = ResNetEncoder(UpperCamelCase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
@replace_return_docstrings(output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None ):
UpperCAmelCase__ : str = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ : Optional[int] = self.embedder(UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = self.encoder(UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = outputs.hidden_states
UpperCAmelCase__ : List[Any] = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
UpperCAmelCase__ : Optional[Any] = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=UpperCamelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=UpperCamelCase_ , )
| 254 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase ( _snake_case ):
def wrapper(*_snake_case ,**_snake_case ):
UpperCAmelCase__ : str = timeit.default_timer()
UpperCAmelCase__ : Dict = func(*_snake_case ,**_snake_case )
UpperCAmelCase__ : Dict = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : Dict = func.__name__
return wrapper
def lowerCamelCase ( _snake_case ,_snake_case=100 ,_snake_case=None ):
UpperCAmelCase__ : int = []
UpperCAmelCase__ : List[Any] = seq_shapes or {}
for i in range(_snake_case ):
UpperCAmelCase__ : Tuple = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_snake_case ,_ArrayXD ):
UpperCAmelCase__ : Union[str, Any] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_snake_case ,datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : List[Any] = 'The small grey turtle was surprisingly fast when challenged.'
else:
UpperCAmelCase__ : List[str] = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(_snake_case ,datasets.Sequence ):
while isinstance(_snake_case ,datasets.Sequence ):
UpperCAmelCase__ : str = v.feature
UpperCAmelCase__ : Optional[Any] = seq_shapes[k]
UpperCAmelCase__ : Union[str, Any] = np.random.rand(*_snake_case ).astype(v.dtype )
UpperCAmelCase__ : str = data
dummy_data.append((i, example) )
return dummy_data
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case=100 ,_snake_case=None ):
UpperCAmelCase__ : Any = generate_examples(_snake_case ,num_examples=_snake_case ,seq_shapes=_snake_case )
with ArrowWriter(features=_snake_case ,path=_snake_case ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : int = features.encode_example(_snake_case )
writer.write(_snake_case )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
UpperCAmelCase__ : str = datasets.Dataset.from_file(filename=_snake_case ,info=datasets.DatasetInfo(features=_snake_case ) )
return dataset
| 254 | 1 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
snake_case_ : Optional[Any] = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
snake_case_ : Optional[Any] = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
snake_case_ : Optional[Any] = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def lowercase__( _UpperCamelCase : Union[str, Any] )-> str:
"""simple docstring"""
def remove_articles(_UpperCamelCase : List[Any] ):
_UpperCamelCase = re.compile(R"\b(a|an|the)\b" , re.UNICODE )
return re.sub(_UpperCamelCase , " " , _UpperCamelCase )
def white_space_fix(_UpperCamelCase : Any ):
return " ".join(text.split() )
def remove_punc(_UpperCamelCase : int ):
_UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCamelCase : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCamelCase ) ) ) )
def lowercase__( _UpperCamelCase : int , _UpperCamelCase : int )-> Optional[Any]:
"""simple docstring"""
return int(normalize_answer(_UpperCamelCase ) == normalize_answer(_UpperCamelCase ) )
def lowercase__( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] )-> Any:
"""simple docstring"""
_UpperCamelCase = [any(compute_exact(_UpperCamelCase , _UpperCamelCase ) for ref in refs ) for pred, refs in zip(_UpperCamelCase , _UpperCamelCase )]
return (sum(_UpperCamelCase ) / len(_UpperCamelCase )) * 100
def lowercase__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : int )-> str:
"""simple docstring"""
_UpperCamelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
_UpperCamelCase = Counter(_UpperCamelCase )
_UpperCamelCase = Counter(_UpperCamelCase )
_UpperCamelCase = Counter()
for sgram, scount in sgramcounter.items():
_UpperCamelCase = scount * numref
_UpperCamelCase = Counter(_UpperCamelCase )
_UpperCamelCase = Counter()
for cgram, ccount in cgramcounter.items():
_UpperCamelCase = ccount * numref
# KEEP
_UpperCamelCase = sgramcounter_rep & cgramcounter_rep
_UpperCamelCase = keepgramcounter_rep & rgramcounter
_UpperCamelCase = sgramcounter_rep & rgramcounter
_UpperCamelCase = 0
_UpperCamelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_UpperCamelCase = 1
_UpperCamelCase = 1
if len(_UpperCamelCase ) > 0:
_UpperCamelCase = keeptmpscorea / len(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_UpperCamelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_UpperCamelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_UpperCamelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_UpperCamelCase = sgramcounter_rep - cgramcounter_rep
_UpperCamelCase = delgramcounter_rep - rgramcounter
_UpperCamelCase = sgramcounter_rep - rgramcounter
_UpperCamelCase = 0
_UpperCamelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_UpperCamelCase = 1
if len(_UpperCamelCase ) > 0:
_UpperCamelCase = deltmpscorea / len(_UpperCamelCase )
# ADDITION
_UpperCamelCase = set(_UpperCamelCase ) - set(_UpperCamelCase )
_UpperCamelCase = set(_UpperCamelCase ) & set(_UpperCamelCase )
_UpperCamelCase = set(_UpperCamelCase ) - set(_UpperCamelCase )
_UpperCamelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_UpperCamelCase = 1
_UpperCamelCase = 1
if len(_UpperCamelCase ) > 0:
_UpperCamelCase = addtmpscore / len(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
_UpperCamelCase = addtmpscore / len(_UpperCamelCase )
_UpperCamelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
_UpperCamelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowercase__( _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int] )-> str:
"""simple docstring"""
_UpperCamelCase = len(_UpperCamelCase )
_UpperCamelCase = ssent.split(" " )
_UpperCamelCase = csent.split(" " )
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
for rsent in rsents:
_UpperCamelCase = rsent.split(" " )
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
ragramslist.append(_UpperCamelCase )
for i in range(0 , len(_UpperCamelCase ) - 1 ):
if i < len(_UpperCamelCase ) - 1:
_UpperCamelCase = ragrams[i] + " " + ragrams[i + 1]
ragrams.append(_UpperCamelCase )
if i < len(_UpperCamelCase ) - 2:
_UpperCamelCase = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(_UpperCamelCase )
if i < len(_UpperCamelCase ) - 3:
_UpperCamelCase = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(_UpperCamelCase )
ragramslist.append(_UpperCamelCase )
ragramslist.append(_UpperCamelCase )
ragramslist.append(_UpperCamelCase )
for i in range(0 , len(_UpperCamelCase ) - 1 ):
if i < len(_UpperCamelCase ) - 1:
_UpperCamelCase = sagrams[i] + " " + sagrams[i + 1]
sagrams.append(_UpperCamelCase )
if i < len(_UpperCamelCase ) - 2:
_UpperCamelCase = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(_UpperCamelCase )
if i < len(_UpperCamelCase ) - 3:
_UpperCamelCase = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(_UpperCamelCase )
for i in range(0 , len(_UpperCamelCase ) - 1 ):
if i < len(_UpperCamelCase ) - 1:
_UpperCamelCase = cagrams[i] + " " + cagrams[i + 1]
cagrams.append(_UpperCamelCase )
if i < len(_UpperCamelCase ) - 2:
_UpperCamelCase = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(_UpperCamelCase )
if i < len(_UpperCamelCase ) - 3:
_UpperCamelCase = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(_UpperCamelCase )
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = SARIngram(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = SARIngram(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = SARIngram(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = SARIngram(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_UpperCamelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_UpperCamelCase = sum([delascore, delascore, delascore, delascore] ) / 4
_UpperCamelCase = sum([addascore, addascore, addascore, addascore] ) / 4
_UpperCamelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowercase__( _UpperCamelCase : Tuple , _UpperCamelCase : bool = True , _UpperCamelCase : str = "13a" , _UpperCamelCase : bool = True )-> str:
"""simple docstring"""
if lowercase:
_UpperCamelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_UpperCamelCase = sacrebleu.metrics.bleu._get_tokenizer(_UpperCamelCase )()(_UpperCamelCase )
else:
_UpperCamelCase = sacrebleu.TOKENIZERS[tokenizer]()(_UpperCamelCase )
elif tokenizer == "moses":
_UpperCamelCase = sacremoses.MosesTokenizer().tokenize(_UpperCamelCase , return_str=_UpperCamelCase , escape=_UpperCamelCase )
elif tokenizer == "penn":
_UpperCamelCase = sacremoses.MosesTokenizer().penn_tokenize(_UpperCamelCase , return_str=_UpperCamelCase )
else:
_UpperCamelCase = sentence
if not return_str:
_UpperCamelCase = normalized_sent.split()
return normalized_sent
def lowercase__( _UpperCamelCase : str , _UpperCamelCase : Tuple , _UpperCamelCase : Any )-> List[str]:
"""simple docstring"""
if not (len(_UpperCamelCase ) == len(_UpperCamelCase ) == len(_UpperCamelCase )):
raise ValueError("Sources length must match predictions and references lengths." )
_UpperCamelCase = 0
for src, pred, refs in zip(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
sari_score += SARIsent(normalize(_UpperCamelCase ) , normalize(_UpperCamelCase ) , [normalize(_UpperCamelCase ) for sent in refs] )
_UpperCamelCase = sari_score / len(_UpperCamelCase )
return 100 * sari_score
def lowercase__( _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple="exp" , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : int=False , _UpperCamelCase : str=False , _UpperCamelCase : str=False , )-> Tuple:
"""simple docstring"""
_UpperCamelCase = len(references[0] )
if any(len(_UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
_UpperCamelCase = [[refs[i] for refs in references] for i in range(_UpperCamelCase )]
_UpperCamelCase = sacrebleu.corpus_bleu(
_UpperCamelCase , _UpperCamelCase , smooth_method=_UpperCamelCase , smooth_value=_UpperCamelCase , force=_UpperCamelCase , lowercase=_UpperCamelCase , use_effective_order=_UpperCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def a ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def a ( self , A_ , A_ , A_ ):
_UpperCamelCase = {}
result.update({"sari": compute_sari(sources=A_ , predictions=A_ , references=A_ )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=A_ , references=A_ )} )
result.update({"exact": compute_em(predictions=A_ , references=A_ )} )
return result
| 138 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowercase__( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : Optional[int] )-> List[Any]:
"""simple docstring"""
_UpperCamelCase = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCamelCase = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_UpperCamelCase = f"{src_lang}-{tgt_lang}"
_UpperCamelCase = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=_UpperCamelCase , exist_ok=_UpperCamelCase )
_UpperCamelCase = os.path.join(_UpperCamelCase , "README.md" )
print(f"Generating {path}" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
snake_case_ : List[Any] = Path(__file__).resolve().parent.parent.parent
snake_case_ : List[Any] = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : str = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 138 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=[30, 30] , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=10 , snake_case_=0.02 , snake_case_=3 , snake_case_=None , snake_case_=8 , snake_case_=10 , ) -> Union[str, Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = n_targets
_UpperCAmelCase = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
_UpperCAmelCase = (image_size[1] // patch_size) * (image_size[0] // patch_size)
_UpperCAmelCase = num_patches + 1 + self.num_detection_tokens
def __A ( self ) -> Union[str, Any]:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
_UpperCAmelCase = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
_UpperCAmelCase = []
for i in range(self.batch_size ):
_UpperCAmelCase = {}
_UpperCAmelCase = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=snake_case_ )
_UpperCAmelCase = torch.rand(self.n_targets , 4 , device=snake_case_ )
labels.append(snake_case_ )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Tuple:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
_UpperCAmelCase = YolosModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
_UpperCAmelCase = YolosForObjectDetection(snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(pixel_values=snake_case_ )
_UpperCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
_UpperCAmelCase = model(pixel_values=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __A ( self ) -> Optional[Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
A__ : Any = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
A__ : Any = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
A__ : int = False
A__ : Any = False
A__ : List[str] = False
A__ : Any = False
def __A ( self , snake_case_ , snake_case_ , snake_case_=False ) -> Any:
_UpperCAmelCase = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
_UpperCAmelCase = []
for i in range(self.model_tester.batch_size ):
_UpperCAmelCase = {}
_UpperCAmelCase = torch.ones(
size=(self.model_tester.n_targets,) , device=snake_case_ , dtype=torch.long )
_UpperCAmelCase = torch.ones(
self.model_tester.n_targets , 4 , device=snake_case_ , dtype=torch.float )
labels.append(snake_case_ )
_UpperCAmelCase = labels
return inputs_dict
def __A ( self ) -> Dict:
_UpperCAmelCase = YolosModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def __A ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __A ( self ) -> str:
# YOLOS does not use inputs_embeds
pass
def __A ( self ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __A ( self ) -> str:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __A ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __A ( self ) -> str:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
# in YOLOS, the seq_len is different
_UpperCAmelCase = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_UpperCAmelCase = len(snake_case_ )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = 1
self.assertEqual(out_len + added_hidden_states , len(snake_case_ ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __A ( self ) -> Any:
def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ):
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
# YOLOS has a different seq_length
_UpperCAmelCase = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def __A ( self ) -> Optional[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*snake_case_ )
@slow
def __A ( self ) -> Optional[Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = YolosModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def A__ ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def __A ( self ) -> Optional[int]:
_UpperCAmelCase = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(snake_case_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(inputs.pixel_values )
# verify outputs
_UpperCAmelCase = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_UpperCAmelCase = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] , device=snake_case_ , )
_UpperCAmelCase = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] , device=snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , snake_case_ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , snake_case_ , atol=1e-4 ) )
# verify postprocessing
_UpperCAmelCase = image_processor.post_process_object_detection(
snake_case_ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
_UpperCAmelCase = torch.tensor([0.99_94, 0.97_90, 0.99_64, 0.99_72, 0.98_61] ).to(snake_case_ )
_UpperCAmelCase = [75, 75, 17, 63, 17]
_UpperCAmelCase = torch.tensor([335.0609, 79.38_48, 375.4216, 187.2495] ).to(snake_case_ )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , snake_case_ , atol=1e-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , snake_case_ )
self.assertTrue(torch.allclose(results["boxes"][0, :] , snake_case_ ) )
| 713 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> Optional[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def __A ( self ) -> int:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> List[str]:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
_UpperCAmelCase = NystromformerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
_UpperCAmelCase = model(snake_case_ , token_type_ids=snake_case_ )
_UpperCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
_UpperCAmelCase = NystromformerForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
_UpperCAmelCase = NystromformerForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = NystromformerForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = NystromformerForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = NystromformerForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ) -> Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
A__ : List[Any] = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : Dict = (
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : str = False
A__ : Union[str, Any] = False
def __A ( self ) -> Dict:
_UpperCAmelCase = NystromformerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def __A ( self ) -> int:
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __A ( self ) -> str:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*snake_case_ )
def __A ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def __A ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def __A ( self ) -> Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def __A ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def __A ( self ) -> Dict:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def __A ( self ) -> Dict:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = NystromformerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Any:
_UpperCAmelCase = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" )
_UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_UpperCAmelCase = model(snake_case_ )[0]
_UpperCAmelCase = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1e-4 ) )
@slow
def __A ( self ) -> Any:
_UpperCAmelCase = "the [MASK] of Belgium is Brussels"
_UpperCAmelCase = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" )
_UpperCAmelCase = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" )
_UpperCAmelCase = tokenizer(snake_case_ , return_tensors="pt" )
with torch.no_grad():
_UpperCAmelCase = model(encoding.input_ids ).logits
_UpperCAmelCase = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case_ ) , "capital" )
| 579 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A : str = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ['OwlViTFeatureExtractor']
__A : Any = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 334 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''torch''', '''torchsde''']
def __init__( self , *lowercase , **lowercase ) -> str:
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(cls , ["""torch""", """torchsde"""] )
| 601 | 0 |
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 9, 14 # noqa: F841
SCREAMING_SNAKE_CASE__ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE__ = defaultdict(__UpperCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
SCREAMING_SNAKE_CASE__ = mst(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
SCREAMING_SNAKE_CASE__ = tuple(answer[:2] )
SCREAMING_SNAKE_CASE__ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 716 | """simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a = 42
a = 42
class lowerCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a = 1
@register_to_config
def __init__( self : Tuple , _snake_case : int = 2000 , _snake_case : float = 0.15 , _snake_case : float = 0.01 , _snake_case : float = 1348.0 , _snake_case : float = 1e-5 , _snake_case : int = 1 , ) -> str:
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__ = sigma_max
# setable values
SCREAMING_SNAKE_CASE__ = None
self.set_sigmas(_snake_case , _snake_case , _snake_case , _snake_case )
def lowerCAmelCase_ ( self : Tuple , _snake_case : torch.FloatTensor , _snake_case : Optional[int] = None ) -> torch.FloatTensor:
return sample
def lowerCAmelCase_ ( self : int , _snake_case : int , _snake_case : float = None , _snake_case : Union[str, torch.device] = None ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
SCREAMING_SNAKE_CASE__ = torch.linspace(1 , _snake_case , _snake_case , device=_snake_case )
def lowerCAmelCase_ ( self : List[Any] , _snake_case : int , _snake_case : float = None , _snake_case : float = None , _snake_case : float = None ) -> str:
SCREAMING_SNAKE_CASE__ = sigma_min if sigma_min is not None else self.config.sigma_min
SCREAMING_SNAKE_CASE__ = sigma_max if sigma_max is not None else self.config.sigma_max
SCREAMING_SNAKE_CASE__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
SCREAMING_SNAKE_CASE__ = torch.exp(torch.linspace(math.log(_snake_case ) , math.log(_snake_case ) , _snake_case ) )
SCREAMING_SNAKE_CASE__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowerCAmelCase_ ( self : Any , _snake_case : Optional[Any] , _snake_case : List[str] ) -> List[Any]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def lowerCAmelCase_ ( self : str , _snake_case : torch.FloatTensor , _snake_case : int , _snake_case : torch.FloatTensor , _snake_case : Optional[torch.Generator] = None , _snake_case : bool = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
SCREAMING_SNAKE_CASE__ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
SCREAMING_SNAKE_CASE__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
SCREAMING_SNAKE_CASE__ = timesteps.to(self.discrete_sigmas.device )
SCREAMING_SNAKE_CASE__ = self.discrete_sigmas[timesteps].to(sample.device )
SCREAMING_SNAKE_CASE__ = self.get_adjacent_sigma(_snake_case , _snake_case ).to(sample.device )
SCREAMING_SNAKE_CASE__ = torch.zeros_like(_snake_case )
SCREAMING_SNAKE_CASE__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
SCREAMING_SNAKE_CASE__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
SCREAMING_SNAKE_CASE__ = diffusion.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
SCREAMING_SNAKE_CASE__ = randn_tensor(
sample.shape , layout=sample.layout , generator=_snake_case , device=sample.device , dtype=sample.dtype )
SCREAMING_SNAKE_CASE__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
SCREAMING_SNAKE_CASE__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_snake_case , prev_sample_mean=_snake_case )
def lowerCAmelCase_ ( self : List[Any] , _snake_case : torch.FloatTensor , _snake_case : torch.FloatTensor , _snake_case : Optional[torch.Generator] = None , _snake_case : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
SCREAMING_SNAKE_CASE__ = randn_tensor(sample.shape , layout=sample.layout , generator=_snake_case ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
SCREAMING_SNAKE_CASE__ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
SCREAMING_SNAKE_CASE__ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
SCREAMING_SNAKE_CASE__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
SCREAMING_SNAKE_CASE__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
SCREAMING_SNAKE_CASE__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
SCREAMING_SNAKE_CASE__ = step_size.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ = sample + step_size * model_output
SCREAMING_SNAKE_CASE__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_snake_case )
def lowerCAmelCase_ ( self : List[str] , _snake_case : torch.FloatTensor , _snake_case : torch.FloatTensor , _snake_case : torch.FloatTensor , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
SCREAMING_SNAKE_CASE__ = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
SCREAMING_SNAKE_CASE__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_snake_case ) * sigmas[:, None, None, None]
)
SCREAMING_SNAKE_CASE__ = noise + original_samples
return noisy_samples
def __len__( self : List[Any] ) -> int:
return self.config.num_train_timesteps
| 538 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __lowerCamelCase :
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.0_2 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , UpperCAmelCase=0 , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = projection_dim
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
lowerCamelCase_ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = TFDPRContextEncoder(config=UpperCAmelCase )
lowerCamelCase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
lowerCamelCase_ = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = TFDPRQuestionEncoder(config=UpperCAmelCase )
lowerCamelCase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
lowerCamelCase_ = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = TFDPRReader(config=UpperCAmelCase )
lowerCamelCase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
a__: Any = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
a__: Optional[int] = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
a__: str = False
a__: Any = False
a__: int = False
a__: Dict = False
a__: Any = False
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = TFDPRModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRContextEncoder.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRContextEncoder.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRReader.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
lowerCamelCase_ = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCamelCase_ = model(UpperCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 29 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCAmelCase = TypeVar('''T''')
lowerCAmelCase = TypeVar('''U''')
class A ( Generic[T, U] ):
def __init__(self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= key
__lowercase= val
__lowercase= None
__lowercase= None
def __repr__(self ):
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class A ( Generic[T, U] ):
def __init__(self ):
__lowercase= DoubleLinkedListNode(lowerCAmelCase , lowerCAmelCase )
__lowercase= DoubleLinkedListNode(lowerCAmelCase , lowerCAmelCase )
__lowercase, __lowercase= self.rear, self.head
def __repr__(self ):
__lowercase= ['DoubleLinkedList']
__lowercase= self.head
while node.next is not None:
rep.append(str(lowerCAmelCase ) )
__lowercase= node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCAmelCase )
def _A (self , lowerCAmelCase ):
__lowercase= self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__lowercase= node
__lowercase= previous
__lowercase= node
__lowercase= self.rear
def _A (self , lowerCAmelCase ):
if node.prev is None or node.next is None:
return None
__lowercase= node.next
__lowercase= node.prev
__lowercase= None
__lowercase= None
return node
class A ( Generic[T, U] ):
UpperCamelCase_ : dict[Callable[[T], U], LRUCache[T, U]] ={}
def __init__(self , lowerCAmelCase ):
__lowercase= DoubleLinkedList()
__lowercase= capacity
__lowercase= 0
__lowercase= 0
__lowercase= 0
__lowercase= {}
def __repr__(self ):
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__(self , lowerCAmelCase ):
return key in self.cache
def _A (self , lowerCAmelCase ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
__lowercase= self.cache[key]
__lowercase= self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCAmelCase )
return node.val
self.miss += 1
return None
def _A (self , lowerCAmelCase , lowerCAmelCase ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__lowercase= self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCAmelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__lowercase= DoubleLinkedListNode(lowerCAmelCase , lowerCAmelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__lowercase= self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__lowercase= value
self.list.add(lowerCAmelCase )
@classmethod
def _A (cls , lowerCAmelCase = 1_2_8 ):
def cache_decorator_inner(lowerCAmelCase ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCAmelCase ) -> U:
if func not in cls.decorator_function_to_instance_map:
__lowercase= LRUCache(lowerCAmelCase )
__lowercase= cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__lowercase= func(*lowerCAmelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , lowerCAmelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCAmelCase , 'cache_info' , lowerCAmelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230 | 0 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
a = MODEL_FOR_MASKED_LM_MAPPING
a = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowerCAmelCase_ ( self : int ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowerCAmelCase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
SCREAMING_SNAKE_CASE__ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_snake_case , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
SCREAMING_SNAKE_CASE__ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_snake_case , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
SCREAMING_SNAKE_CASE__ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_snake_case , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def lowerCAmelCase_ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
SCREAMING_SNAKE_CASE__ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_snake_case , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
SCREAMING_SNAKE_CASE__ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_snake_case , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
SCREAMING_SNAKE_CASE__ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_snake_case , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
SCREAMING_SNAKE_CASE__ = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def lowerCAmelCase_ ( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE__ = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
SCREAMING_SNAKE_CASE__ = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_snake_case , _snake_case )
@slow
@require_torch
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(_snake_case )
@slow
@require_tf
def lowerCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(_snake_case )
def lowerCAmelCase_ ( self : Optional[Any] , _snake_case : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_snake_case ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
SCREAMING_SNAKE_CASE__ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_snake_case ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
SCREAMING_SNAKE_CASE__ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_snake_case ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def lowerCAmelCase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
self.run_pipeline_test(_snake_case , [] )
@require_tf
def lowerCAmelCase_ ( self : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
self.run_pipeline_test(_snake_case , [] )
def lowerCAmelCase_ ( self : str , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Any ) -> Optional[int]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
SCREAMING_SNAKE_CASE__ = FillMaskPipeline(model=_snake_case , tokenizer=_snake_case )
SCREAMING_SNAKE_CASE__ = [
F"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def lowerCAmelCase_ ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = fill_masker.tokenizer
SCREAMING_SNAKE_CASE__ = fill_masker.model
SCREAMING_SNAKE_CASE__ = fill_masker(
F"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
_snake_case , [
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
] , )
SCREAMING_SNAKE_CASE__ = fill_masker([F"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
_snake_case , [
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
] , )
SCREAMING_SNAKE_CASE__ = fill_masker([F"""This is a {tokenizer.mask_token}""", F"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
_snake_case , [
[
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
],
[
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
],
] , )
with self.assertRaises(_snake_case ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_snake_case ):
fill_masker("This is" )
self.run_test_top_k(_snake_case , _snake_case )
self.run_test_targets(_snake_case , _snake_case )
self.run_test_top_k_targets(_snake_case , _snake_case )
self.fill_mask_with_duplicate_targets_and_top_k(_snake_case , _snake_case )
self.fill_mask_with_multiple_masks(_snake_case , _snake_case )
def lowerCAmelCase_ ( self : Any , _snake_case : Optional[int] , _snake_case : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = tokenizer.get_vocab()
SCREAMING_SNAKE_CASE__ = sorted(vocab.keys() )[:2]
# Pipeline argument
SCREAMING_SNAKE_CASE__ = FillMaskPipeline(model=_snake_case , tokenizer=_snake_case , targets=_snake_case )
SCREAMING_SNAKE_CASE__ = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_snake_case , [
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
] , )
SCREAMING_SNAKE_CASE__ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , _snake_case )
SCREAMING_SNAKE_CASE__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(_snake_case ) )
# Call argument
SCREAMING_SNAKE_CASE__ = FillMaskPipeline(model=_snake_case , tokenizer=_snake_case )
SCREAMING_SNAKE_CASE__ = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=_snake_case )
self.assertEqual(
_snake_case , [
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
] , )
SCREAMING_SNAKE_CASE__ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , _snake_case )
SCREAMING_SNAKE_CASE__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(_snake_case ) )
# Score equivalence
SCREAMING_SNAKE_CASE__ = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=_snake_case )
SCREAMING_SNAKE_CASE__ = [top_mask["token_str"] for top_mask in outputs]
SCREAMING_SNAKE_CASE__ = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_snake_case ) == set(_snake_case ):
SCREAMING_SNAKE_CASE__ = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=_snake_case )
SCREAMING_SNAKE_CASE__ = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_snake_case ) , nested_simplify(_snake_case ) )
# Raises with invalid
with self.assertRaises(_snake_case ):
SCREAMING_SNAKE_CASE__ = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_snake_case ):
SCREAMING_SNAKE_CASE__ = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[""] )
with self.assertRaises(_snake_case ):
SCREAMING_SNAKE_CASE__ = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets="" )
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : List[Any] , _snake_case : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = FillMaskPipeline(model=_snake_case , tokenizer=_snake_case , top_k=2 )
SCREAMING_SNAKE_CASE__ = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_snake_case , [
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
] , )
SCREAMING_SNAKE_CASE__ = FillMaskPipeline(model=_snake_case , tokenizer=_snake_case )
SCREAMING_SNAKE_CASE__ = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_snake_case , [
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
] , )
self.assertEqual(nested_simplify(_snake_case ) , nested_simplify(_snake_case ) )
def lowerCAmelCase_ ( self : Tuple , _snake_case : int , _snake_case : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = tokenizer.get_vocab()
SCREAMING_SNAKE_CASE__ = FillMaskPipeline(model=_snake_case , tokenizer=_snake_case )
# top_k=2, ntargets=3
SCREAMING_SNAKE_CASE__ = sorted(vocab.keys() )[:3]
SCREAMING_SNAKE_CASE__ = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_snake_case )
# If we use the most probably targets, and filter differently, we should still
# have the same results
SCREAMING_SNAKE_CASE__ = [el["token_str"] for el in sorted(_snake_case , key=lambda _snake_case : x["score"] , reverse=_snake_case )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_snake_case ).issubset(_snake_case ):
SCREAMING_SNAKE_CASE__ = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_snake_case )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_snake_case ) , nested_simplify(_snake_case ) )
def lowerCAmelCase_ ( self : Optional[int] , _snake_case : Optional[int] , _snake_case : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = FillMaskPipeline(model=_snake_case , tokenizer=_snake_case )
SCREAMING_SNAKE_CASE__ = tokenizer.get_vocab()
# String duplicates + id duplicates
SCREAMING_SNAKE_CASE__ = sorted(vocab.keys() )[:3]
SCREAMING_SNAKE_CASE__ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
SCREAMING_SNAKE_CASE__ = fill_masker(F"""My name is {tokenizer.mask_token}""" , targets=_snake_case , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_snake_case ) , 3 )
def lowerCAmelCase_ ( self : Dict , _snake_case : Dict , _snake_case : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = FillMaskPipeline(model=_snake_case , tokenizer=_snake_case )
SCREAMING_SNAKE_CASE__ = fill_masker(
F"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_snake_case , [
[
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
],
[
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
],
[
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
{"sequence": ANY(_snake_case ), "score": ANY(_snake_case ), "token": ANY(_snake_case ), "token_str": ANY(_snake_case )},
],
] , )
| 538 | """simple docstring"""
from math import pi
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 538 | 1 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int ):
return int((input_a, input_a).count(1 ) != 0 )
def _SCREAMING_SNAKE_CASE ():
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 4 | '''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ ) ->bool:
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 451 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__snake_case = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , **lowerCamelCase__ ) -> Optional[Any]:
super().__init__(**lowerCamelCase__ )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , """vision""" )
self.check_model_type(lowerCamelCase__ )
def __call__( self , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> List[str]:
if "text_queries" in kwargs:
lowercase__ : Union[str, Any] = kwargs.pop("""text_queries""" )
if isinstance(lowerCamelCase__ , (str, Image.Image) ):
lowercase__ : str = {"""image""": image, """candidate_labels""": candidate_labels}
else:
lowercase__ : List[Any] = image
lowercase__ : Dict = super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
return results
def UpperCAmelCase__( self , **lowerCamelCase__ ) -> List[Any]:
lowercase__ : Tuple = {}
if "threshold" in kwargs:
lowercase__ : str = kwargs["""threshold"""]
if "top_k" in kwargs:
lowercase__ : Tuple = kwargs["""top_k"""]
return {}, {}, postprocess_params
def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[str]:
lowercase__ : Any = load_image(inputs["""image"""] )
lowercase__ : Optional[Any] = inputs["""candidate_labels"""]
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Optional[int] = candidate_labels.split(""",""" )
lowercase__ : str = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(lowerCamelCase__ ):
lowercase__ : Optional[int] = self.tokenizer(lowerCamelCase__ , return_tensors=self.framework )
lowercase__ : Union[str, Any] = self.image_processor(lowerCamelCase__ , return_tensors=self.framework )
yield {
"is_last": i == len(lowerCamelCase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[Any]:
lowercase__ : Union[str, Any] = model_inputs.pop("""target_size""" )
lowercase__ : int = model_inputs.pop("""candidate_label""" )
lowercase__ : Optional[int] = model_inputs.pop("""is_last""" )
lowercase__ : int = self.model(**lowerCamelCase__ )
lowercase__ : Any = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__=0.1 , lowerCamelCase__=None ) -> Union[str, Any]:
lowercase__ : Tuple = []
for model_output in model_outputs:
lowercase__ : Optional[Any] = model_output["""candidate_label"""]
lowercase__ : Union[str, Any] = BaseModelOutput(lowerCamelCase__ )
lowercase__ : Union[str, Any] = self.image_processor.post_process_object_detection(
outputs=lowerCamelCase__ , threshold=lowerCamelCase__ , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
lowercase__ : int = outputs["""scores"""][index].item()
lowercase__ : Tuple = self._get_bounding_box(outputs["""boxes"""][index][0] )
lowercase__ : Optional[Any] = {"""score""": score, """label""": label, """box""": box}
results.append(lowerCamelCase__ )
lowercase__ : Optional[Any] = sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x["score"] , reverse=lowerCamelCase__ )
if top_k:
lowercase__ : Dict = results[:top_k]
return results
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = box.int().tolist()
lowercase__ : Tuple = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox | 128 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 128 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : int = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Any, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Union[str, Any]=1_3, _UpperCAmelCase : Union[str, Any]=3_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : Tuple=3, _UpperCAmelCase : str=1_6, _UpperCAmelCase : Tuple=[1, 2, 1], _UpperCAmelCase : List[str]=[2, 2, 4], _UpperCAmelCase : Tuple=2, _UpperCAmelCase : str=2.0, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=0.0, _UpperCAmelCase : Any=0.0, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : int="gelu", _UpperCAmelCase : Any=False, _UpperCAmelCase : Any=True, _UpperCAmelCase : Tuple=0.02, _UpperCAmelCase : Any=1E-5, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : List[Any]=None, _UpperCAmelCase : str=True, _UpperCAmelCase : Union[str, Any]=1_0, _UpperCAmelCase : List[str]=8, _UpperCAmelCase : Union[str, Any]=["stage1", "stage2", "stage3"], _UpperCAmelCase : Any=[1, 2, 3], ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = embed_dim
SCREAMING_SNAKE_CASE__ : List[Any] = depths
SCREAMING_SNAKE_CASE__ : List[str] = num_heads
SCREAMING_SNAKE_CASE__ : str = window_size
SCREAMING_SNAKE_CASE__ : Any = mlp_ratio
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = patch_norm
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = scope
SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_stride
SCREAMING_SNAKE_CASE__ : List[Any] = out_features
SCREAMING_SNAKE_CASE__ : Dict = out_indices
def A_ ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def A_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, )
def A_ ( self : Dict, _UpperCAmelCase : int, _UpperCAmelCase : str, _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) )
def A_ ( self : Optional[int], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Any, _UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = MaskFormerSwinBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["stem"]
SCREAMING_SNAKE_CASE__ : str = MaskFormerSwinBackbone(config=_UpperCAmelCase )
def A_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = MaskFormerSwinModelTester(self )
SCREAMING_SNAKE_CASE__ : Any = ConfigTester(self, config_class=_UpperCAmelCase, embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def A_ ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
return
def A_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
@unittest.skip("Swin does not use inputs_embeds" )
def A_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("Swin does not support feedforward chunking" )
def A_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def A_ ( self : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase, nn.Linear ) )
def A_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1], _UpperCAmelCase )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def A_ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
def A_ ( self : List[str], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(**self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(
self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ), _UpperCAmelCase )
# Swin has a different seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[int] = 3
SCREAMING_SNAKE_CASE__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE__ : str = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Any = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def A_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def A_ ( self : Dict ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def A_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
def A_ ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_UpperCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = 0
return t
def check_equivalence(_UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : Optional[Any]={} ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase ).to_tuple()
def recursive_check(_UpperCAmelCase : int, _UpperCAmelCase : Dict ):
if isinstance(_UpperCAmelCase, (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_UpperCAmelCase, _UpperCAmelCase ):
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
elif isinstance(_UpperCAmelCase, _UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values() ):
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_UpperCAmelCase ), set_nan_tensor_to_zero(_UpperCAmelCase ), atol=1E-5 ), msg=(
"Tuple and dict output are not equal. Difference:"
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}. Dict has'''
F''' `nan`: {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}.'''
), )
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} )
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} )
@require_torch
class lowerCamelCase (unittest.TestCase , __lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCAmelCase_ = MaskFormerSwinConfig
def A_ ( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModelTester(self )
def A_ ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Any = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = backbone_class(_UpperCAmelCase )
backbone.to(_UpperCAmelCase )
backbone.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps, _UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels ):
self.assertTrue(feature_map.shape[:2], (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase, output_hidden_states=_UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ), len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:], backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels), (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
SCREAMING_SNAKE_CASE__ : int = backbone(**_UpperCAmelCase, output_attentions=_UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 663 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Union[str, Any] = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 704 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
A : Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A : List[Any] = 2_5_0_0_0_4
A : int = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =MBartaaTokenizer
__UpperCAmelCase : Union[str, Any] =MBartaaTokenizerFast
__UpperCAmelCase : Optional[Any] =True
__UpperCAmelCase : Tuple =True
def snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = MBartaaTokenizer(__a , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = "<s>"
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def snake_case ( self ):
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__a ) , 10_54 )
def snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def snake_case ( self ):
__lowerCAmelCase = MBartaaTokenizer(__a , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__a )
__lowerCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowerCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def snake_case ( self ):
# fmt: off
__lowerCAmelCase = {"input_ids": [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def snake_case ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__lowerCAmelCase = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(__a , **__a )
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(__a )
__lowerCAmelCase = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
__lowerCAmelCase = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(__a )
__lowerCAmelCase = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=True
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(__a , legacy_format=__a )
__lowerCAmelCase = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(__a )
__lowerCAmelCase = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=False
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(__a , legacy_format=__a )
__lowerCAmelCase = tokenizer_p.save_pretrained(__a )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(__a )
__lowerCAmelCase = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] ="""facebook/mbart-large-50-one-to-many-mmt"""
__UpperCAmelCase : str =[
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
__UpperCAmelCase : Optional[Any] =[
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
__UpperCAmelCase : List[str] =[EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def snake_case ( cls ):
__lowerCAmelCase = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
__lowerCAmelCase = 1
return cls
def snake_case ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 25_00_38 )
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def snake_case ( self ):
self.assertIn(__a , self.tokenizer.all_special_ids )
__lowerCAmelCase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__lowerCAmelCase = self.tokenizer.decode(__a , skip_special_tokens=__a )
__lowerCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def snake_case ( self ):
__lowerCAmelCase = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , __a )
__lowerCAmelCase = 10
__lowerCAmelCase = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[0] , __a )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__a ) , __a )
def snake_case ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_00_53, 25_00_01] )
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
__lowerCAmelCase = MBartaaTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors="pt" )
__lowerCAmelCase = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
__lowerCAmelCase = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors="pt" )
__lowerCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=10 , return_tensors="pt" )
__lowerCAmelCase = targets["input_ids"]
__lowerCAmelCase = shift_tokens_right(__a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(__a ) , {
# en_XX, A, test, EOS
"input_ids": [[25_00_04, 62, 30_34, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_00_01,
} , )
| 282 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__lowerCamelCase : Optional[Any] = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 385 |
from PIL import Image
def A__ ( _a : Image , _a : float ):
'''simple docstring'''
def brightness(_a : int ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_a )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
__lowerCamelCase : str = change_brightness(img, 1_00)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 385 | 1 |
import torch
from diffusers import DiffusionPipeline
class a ( _UpperCAmelCase ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
def __call__( self : Dict ):
__lowerCamelCase: Optional[int] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__lowerCamelCase: int = 1
__lowerCamelCase: int = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
__lowerCamelCase: Any = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
__lowerCamelCase: Optional[int] = scheduler_output - scheduler_output + torch.ones_like(SCREAMING_SNAKE_CASE_ )
return result
| 720 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_A : Optional[Any] = logging.get_logger(__name__)
# General docstring
_A : int = '''ResNetConfig'''
# Base docstring
_A : Union[str, Any] = '''microsoft/resnet-50'''
_A : str = [1, 2_048, 7, 7]
# Image classification docstring
_A : List[str] = '''microsoft/resnet-50'''
_A : Optional[int] = '''tiger cat'''
_A : Optional[Any] = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class a ( nn.Module ):
def __init__( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : str = "relu" ):
super().__init__()
__lowerCamelCase: Optional[int] = nn.Convad(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , padding=kernel_size // 2 , bias=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[str] = nn.BatchNormad(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tensor ):
__lowerCamelCase: Optional[int] = self.convolution(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Tuple = self.normalization(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Union[str, Any] = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class a ( nn.Module ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : ResNetConfig ):
super().__init__()
__lowerCamelCase: Tuple = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
__lowerCamelCase: Tuple = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
__lowerCamelCase: List[Any] = config.num_channels
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Tensor ):
__lowerCamelCase: Optional[Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
__lowerCamelCase: Optional[Any] = self.embedder(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Tuple = self.pooler(SCREAMING_SNAKE_CASE_ )
return embedding
class a ( nn.Module ):
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 ):
super().__init__()
__lowerCamelCase: Optional[Any] = nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 , stride=SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[Any] = nn.BatchNormad(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Tensor ):
__lowerCamelCase: Optional[int] = self.convolution(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = self.normalization(SCREAMING_SNAKE_CASE_ )
return hidden_state
class a ( nn.Module ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : str = "relu" ):
super().__init__()
__lowerCamelCase: Union[str, Any] = in_channels != out_channels or stride != 1
__lowerCamelCase: int = (
ResNetShortCut(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ ) if should_apply_shortcut else nn.Identity()
)
__lowerCamelCase: List[str] = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , activation=SCREAMING_SNAKE_CASE_ ) , )
__lowerCamelCase: Dict = ACTaFN[activation]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ):
__lowerCamelCase: List[Any] = hidden_state
__lowerCamelCase: Optional[Any] = self.layer(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Union[str, Any] = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
__lowerCamelCase: str = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class a ( nn.Module ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : str = "relu" , SCREAMING_SNAKE_CASE_ : int = 4 ):
super().__init__()
__lowerCamelCase: str = in_channels != out_channels or stride != 1
__lowerCamelCase: Any = out_channels // reduction
__lowerCamelCase: str = (
ResNetShortCut(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ ) if should_apply_shortcut else nn.Identity()
)
__lowerCamelCase: Any = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 ) , ResNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ ) , )
__lowerCamelCase: str = ACTaFN[activation]
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
__lowerCamelCase: Optional[Any] = hidden_state
__lowerCamelCase: int = self.layer(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
__lowerCamelCase: List[str] = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class a ( nn.Module ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : ResNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , ):
super().__init__()
__lowerCamelCase: str = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
__lowerCamelCase: str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act ) , *[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tensor ):
__lowerCamelCase: Tuple = input
for layer in self.layers:
__lowerCamelCase: int = layer(SCREAMING_SNAKE_CASE_ )
return hidden_state
class a ( nn.Module ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : ResNetConfig ):
super().__init__()
__lowerCamelCase: Any = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowerCamelCase: List[str] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(SCREAMING_SNAKE_CASE_ , config.depths[1:] ):
self.stages.append(ResNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True ):
__lowerCamelCase: List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCamelCase: Optional[Any] = hidden_states + (hidden_state,)
__lowerCamelCase: Union[str, Any] = stage_module(SCREAMING_SNAKE_CASE_ )
if output_hidden_states:
__lowerCamelCase: Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ , )
class a ( _UpperCAmelCase ):
UpperCAmelCase__ : Dict = ResNetConfig
UpperCAmelCase__ : str = "resnet"
UpperCAmelCase__ : Optional[int] = "pixel_values"
UpperCAmelCase__ : List[str] = True
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple ):
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(SCREAMING_SNAKE_CASE_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str=False ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase: Union[str, Any] = value
_A : Dict = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_A : Optional[int] = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,_UpperCAmelCase ,)
class a ( _UpperCAmelCase ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
super().__init__(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = config
__lowerCamelCase: Tuple = ResNetEmbeddings(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[str] = ResNetEncoder(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None ):
__lowerCamelCase: Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase: List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase: Any = self.embedder(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[str] = self.encoder(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[str] = encoder_outputs[0]
__lowerCamelCase: List[Any] = self.pooler(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,_UpperCAmelCase ,)
class a ( _UpperCAmelCase ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ):
super().__init__(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[Any] = config.num_labels
__lowerCamelCase: int = ResNetModel(SCREAMING_SNAKE_CASE_ )
# classification head
__lowerCamelCase: str = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.LongTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , ):
__lowerCamelCase: Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase: List[str] = self.resnet(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Tuple = outputs.pooler_output if return_dict else outputs[1]
__lowerCamelCase: str = self.classifier(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCamelCase: Dict = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCamelCase: Dict = """single_label_classification"""
else:
__lowerCamelCase: Tuple = """multi_label_classification"""
if self.config.problem_type == "regression":
__lowerCamelCase: Any = MSELoss()
if self.num_labels == 1:
__lowerCamelCase: Dict = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowerCamelCase: str = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif self.config.problem_type == "single_label_classification":
__lowerCamelCase: List[Any] = CrossEntropyLoss()
__lowerCamelCase: Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowerCamelCase: int = BCEWithLogitsLoss()
__lowerCamelCase: str = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
__lowerCamelCase: List[str] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,_UpperCAmelCase ,)
class a ( _UpperCAmelCase ,_UpperCAmelCase ):
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
super().__init__(SCREAMING_SNAKE_CASE_ )
super()._init_backbone(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[int] = [config.embedding_size] + config.hidden_sizes
__lowerCamelCase: str = ResNetEmbeddings(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[Any] = ResNetEncoder(SCREAMING_SNAKE_CASE_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None ):
__lowerCamelCase: Dict = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase: Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase: str = self.embedder(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = self.encoder(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[str] = outputs.hidden_states
__lowerCamelCase: Any = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__lowerCamelCase: Union[str, Any] = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=SCREAMING_SNAKE_CASE_ , )
| 189 | 0 |
def __UpperCAmelCase ( lowerCamelCase_ : int = 1_00_00_00 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : str = 1
SCREAMING_SNAKE_CASE_ : Tuple = {1: 1}
for inputa in range(2 , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : List[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = (3 * number) + 1
counter += 1
if inputa not in counters:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = counter
if counter > pre_counter:
SCREAMING_SNAKE_CASE_ : int = inputa
SCREAMING_SNAKE_CASE_ : Tuple = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 105 | '''simple docstring'''
import math
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float ):
"""simple docstring"""
if (
not isinstance(UpperCamelCase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float ):
"""simple docstring"""
if (
not isinstance(UpperCamelCase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase : Optional[int] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 700 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
UpperCAmelCase : Tuple = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
UpperCAmelCase : Dict = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
UpperCAmelCase : List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def lowerCamelCase ( _UpperCamelCase : str ) -> dict[str, int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowerCamelCase ( _UpperCamelCase : tuple ) -> str:
'''simple docstring'''
return x[0]
def lowerCamelCase ( _UpperCamelCase : str ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = get_letter_count(_UpperCamelCase )
__UpperCAmelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_UpperCamelCase )
__UpperCAmelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_UpperCamelCase )
__UpperCAmelCase : Any = """""".join(freq_to_letter[freq] )
__UpperCAmelCase : Any = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_UpperCamelCase , reverse=_UpperCamelCase )
__UpperCAmelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_UpperCamelCase )
def lowerCamelCase ( _UpperCamelCase : str ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = get_frequency_order(_UpperCamelCase )
__UpperCAmelCase : Optional[int] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299 | 0 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Union[str, Any]:
UpperCAmelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm1.weight""", F"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm1.bias""", F"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.weight""", F"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.bias""", F"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm2.weight""", F"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm2.bias""", F"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.weight""", F"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.bias""", F"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc2.weight""", F"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.mlp.fc2.bias""", F"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Any:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
UpperCAmelCase = state_dict.pop(F"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
UpperCAmelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->Optional[Any]:
UpperCAmelCase = dct.pop(lowerCAmelCase_ )
UpperCAmelCase = val
def _UpperCamelCase ( lowerCAmelCase_ ) ->Any:
if "handwritten" in checkpoint_url:
UpperCAmelCase = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCAmelCase = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
UpperCAmelCase = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Optional[int]:
UpperCAmelCase = ViTConfig(image_size=3_8_4 , qkv_bias=lowerCAmelCase_ )
UpperCAmelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
UpperCAmelCase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
UpperCAmelCase = 1_0_2_4
UpperCAmelCase = 4_0_9_6
UpperCAmelCase = 2_4
UpperCAmelCase = 1_6
UpperCAmelCase = 1_0_2_4
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCAmelCase = False
UpperCAmelCase = """relu"""
UpperCAmelCase = 1_0_2_4
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
# load HuggingFace model
UpperCAmelCase = ViTModel(lowerCAmelCase_ , add_pooling_layer=lowerCAmelCase_ )
UpperCAmelCase = TrOCRForCausalLM(lowerCAmelCase_ )
UpperCAmelCase = VisionEncoderDecoderModel(encoder=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
model.eval()
# load state_dict of original model, rename some keys
UpperCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" , check_hash=lowerCAmelCase_ )["""model"""]
UpperCAmelCase = create_rename_keys(lowerCAmelCase_ , lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
UpperCAmelCase = state_dict.pop(lowerCAmelCase_ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
UpperCAmelCase = val
else:
UpperCAmelCase = val
# load state dict
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size )
UpperCAmelCase = RobertaTokenizer.from_pretrained("""roberta-large""" )
UpperCAmelCase = TrOCRProcessor(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = processor(images=prepare_img(lowerCAmelCase_ ) , return_tensors="""pt""" ).pixel_values
# verify logits
UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
UpperCAmelCase = model(pixel_values=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ )
UpperCAmelCase = outputs.logits
UpperCAmelCase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
UpperCAmelCase = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
UpperCAmelCase = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
UpperCAmelCase = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
UpperCAmelCase = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , lowerCAmelCase_ , atol=1e-3 ), "First elements of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__a = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 377 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 377 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = BioGptTokenizer
UpperCamelCase = False
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowerCamelCase_ = dict(zip(A_ , range(len(A_ ) ) ) )
lowerCamelCase_ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(A_ ) )
def a__ ( self : Optional[int] , A_ : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = 'lower newer'
return input_text, output_text
def a__ ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase_ = 'lower'
lowerCamelCase_ = ['low', 'er</w>']
lowerCamelCase_ = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = tokens + ['<unk>']
lowerCamelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
@slow
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
lowerCamelCase_ = tokenizer.encode('sequence builders' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.encode('multi-sequence build' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 651 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCamelCase : List[Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : Tuple = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('-f' )
lowerCamelCase_ = parser.parse_args()
return args.f
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Dict="eval" ):
'''simple docstring'''
lowerCamelCase_ = os.path.join(lowercase , f"""{split}_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase , 'r' ) as f:
return json.load(lowercase )
raise ValueError(f"""can't find {path}""" )
lowerCamelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_glue.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_clm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 100 )
@slow
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_summarization_flax.main()
lowerCamelCase_ = get_results(A_ , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 10 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 42 )
@slow
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_ta_mlm_flax.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = 7 if get_gpu_count() > 1 else 2
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_ner.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def a__ ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A_ , 'argv' , A_ ):
run_qa.main()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_f1'] , 30 )
self.assertGreaterEqual(result['eval_exact'] , 30 )
| 651 | 1 |
"""simple docstring"""
a_ = tuple[float, float, float]
a_ = tuple[float, float, float]
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : List[str] = end_pointa[0] - end_pointa[0]
__lowercase : str = end_pointa[1] - end_pointa[1]
__lowercase : str = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : Tuple = ab[1] * ac[2] - ab[2] * ac[1] # *i
__lowercase : Any = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__lowercase : int = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
return tuple(round(__UpperCamelCase , __UpperCamelCase ) for x in vector ) == (0, 0, 0)
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 10 ):
__lowercase : Dict = create_vector(__UpperCamelCase , __UpperCamelCase )
__lowercase : Optional[int] = create_vector(__UpperCamelCase , __UpperCamelCase )
return is_zero_vector(get_ad_vectors_cross(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
| 76 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : WhisperForConditionalGeneration , UpperCAmelCase_ : WhisperProcessor , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : CLIPTextModel , UpperCAmelCase_ : CLIPTokenizer , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase_ : StableDiffusionSafetyChecker , UpperCAmelCase_ : CLIPImageProcessor , ) -> List[Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=UpperCAmelCase_ , speech_processor=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , )
def __lowerCamelCase ( self : Tuple , UpperCAmelCase_ : Optional[Union[str, int]] = "auto" ) -> List[str]:
"""simple docstring"""
if slice_size == "auto":
_lowerCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_ )
def __lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
self.enable_attention_slicing(UpperCAmelCase_ )
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=16_000 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Any , ) -> Any:
"""simple docstring"""
_lowerCAmelCase = self.speech_processor.feature_extractor(
UpperCAmelCase_ , return_tensors='pt' , sampling_rate=UpperCAmelCase_ ).input_features.to(self.device )
_lowerCAmelCase = self.speech_model.generate(UpperCAmelCase_ , max_length=480_000 )
_lowerCAmelCase = self.speech_processor.tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , normalize=UpperCAmelCase_ )[
0
]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = 1
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = len(UpperCAmelCase_ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase_ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(UpperCAmelCase_ )}.""" )
# get prompt text embeddings
_lowerCAmelCase = self.tokenizer(
UpperCAmelCase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_lowerCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_lowerCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
_lowerCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = text_embeddings.shape
_lowerCAmelCase = text_embeddings.repeat(1 , UpperCAmelCase_ , 1 )
_lowerCAmelCase = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCAmelCase_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase = 42
if negative_prompt is None:
_lowerCAmelCase = [''] * batch_size
elif type(UpperCAmelCase_ ) is not type(UpperCAmelCase_ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase_ )} !="""
F""" {type(UpperCAmelCase_ )}.""" )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = [negative_prompt]
elif batch_size != len(UpperCAmelCase_ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase_ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
_lowerCAmelCase = negative_prompt
_lowerCAmelCase = text_input_ids.shape[-1]
_lowerCAmelCase = self.tokenizer(
UpperCAmelCase_ , padding='max_length' , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='pt' , )
_lowerCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase = uncond_embeddings.shape[1]
_lowerCAmelCase = uncond_embeddings.repeat(1 , UpperCAmelCase_ , 1 )
_lowerCAmelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCAmelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_lowerCAmelCase = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device='cpu' , dtype=UpperCAmelCase_ ).to(
self.device )
else:
_lowerCAmelCase = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=self.device , dtype=UpperCAmelCase_ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_lowerCAmelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase = {}
if accepts_eta:
_lowerCAmelCase = eta
for i, t in enumerate(self.progress_bar(UpperCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# predict the noise residual
_lowerCAmelCase = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ ).sample
# perform guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase = noise_pred.chunk(2 )
_lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = 1 / 0.18215 * latents
_lowerCAmelCase = self.vae.decode(UpperCAmelCase_ ).sample
_lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=UpperCAmelCase_ , nsfw_content_detected=UpperCAmelCase_ )
| 580 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
A : ClassVar[Features] = Features({'audio': Audio()} )
A : ClassVar[Features] = Features({'labels': ClassLabel} )
A : str = "audio"
A : str = "labels"
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , _SCREAMING_SNAKE_CASE ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
snake_case_ : Dict = copy.deepcopy(self )
snake_case_ : List[Any] = self.label_schema.copy()
snake_case_ : Dict = features[self.label_column]
snake_case_ : Optional[int] = label_schema
return task_template
@property
def _lowerCAmelCase ( self ) -> Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 719 |
import heapq
import sys
import numpy as np
lowercase : str = tuple[int, int]
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self ) -> Optional[int]:
snake_case_ : int = []
snake_case_ : int = set()
def _lowerCAmelCase ( self ) -> List[Any]:
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def _lowerCAmelCase ( self ) -> Optional[int]:
return len(self.elements ) == 0
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_SCREAMING_SNAKE_CASE )
else:
# update
# print("update", item)
snake_case_ : Any = []
((snake_case_) , (snake_case_)) : Optional[Any] = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((snake_case_) , (snake_case_)) : Optional[int] = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any:
if item in self.set:
self.set.remove(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = []
((snake_case_) , (snake_case_)) : Tuple = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((snake_case_) , (snake_case_)) : Dict = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _lowerCAmelCase ( self ) -> Optional[Any]:
return self.elements[0][1]
def _lowerCAmelCase ( self ) -> Optional[int]:
((snake_case_) , (snake_case_)) : Any = heapq.heappop(self.elements )
self.set.remove(_SCREAMING_SNAKE_CASE )
return (priority, item)
def lowerCAmelCase__ ( _a : TPos , _a : TPos ):
# euclidean distance
snake_case_ : Optional[Any] = np.array(_a )
snake_case_ : Dict = np.array(_a )
return np.linalg.norm(a - b )
def lowerCAmelCase__ ( _a : TPos , _a : TPos ):
# integer division by time variable
return consistent_heuristic(_a , _a ) // t
def lowerCAmelCase__ ( _a : TPos , _a : TPos ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCAmelCase__ ( _a : TPos , _a : int , _a : TPos , _a : dict[TPos, float] ):
snake_case_ : List[Any] = g_function[start] + Wa * heuristics[i](_a , _a )
return ans
def lowerCAmelCase__ ( _a : Tuple , _a : Union[str, Any] , _a : List[Any] ):
snake_case_ : Dict = np.chararray((n, n) )
for i in range(_a ):
for j in range(_a ):
snake_case_ : List[str] = "*"
for i in range(_a ):
for j in range(_a ):
if (j, (n - 1) - i) in blocks:
snake_case_ : Optional[Any] = "#"
snake_case_ : Optional[int] = "-"
snake_case_ : Tuple = back_pointer[goal]
while x != start:
((snake_case_) , (snake_case_)) : Any = x
# print(x)
snake_case_ : List[str] = "-"
snake_case_ : Dict = back_pointer[x]
snake_case_ : str = "-"
for i in range(_a ):
for j in range(_a ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
snake_case_ : List[Any] = back_pointer[goal]
while x != start:
print(_a , end=" " )
snake_case_ : int = back_pointer[x]
print(_a )
sys.exit()
def lowerCAmelCase__ ( _a : TPos ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCAmelCase__ ( _a : int , _a : str , _a : Optional[int] , _a : List[str] , _a : Optional[Any] , _a : Dict , _a : Tuple , _a : Any , ):
for itera in range(_a ):
open_list[itera].remove_element(_a )
# print("s", s)
# print("j", j)
((snake_case_) , (snake_case_)) : Tuple = s
snake_case_ : Dict = (x - 1, y)
snake_case_ : Union[str, Any] = (x + 1, y)
snake_case_ : List[str] = (x, y + 1)
snake_case_ : int = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_a ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_a )
snake_case_ : Optional[int] = -1
snake_case_ : int = float("inf" )
if valid(_a ) and g_function[neighbours] > g_function[s] + 1:
snake_case_ : int = g_function[s] + 1
snake_case_ : Any = s
if neighbours not in close_list_anchor:
open_list[0].put(_a , key(_a , 0 , _a , _a ) )
if neighbours not in close_list_inad:
for var in range(1 , _a ):
if key(_a , _a , _a , _a ) <= Wa * key(
_a , 0 , _a , _a ):
open_list[j].put(
_a , key(_a , _a , _a , _a ) )
def lowerCAmelCase__ ( ):
snake_case_ : Union[str, Any] = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
lowercase : Union[str, Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
lowercase : Union[str, Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
lowercase : int = make_common_ground()
lowercase : Optional[Any] = blocks_blk
# hyper parameters
lowercase : Optional[Any] = 1
lowercase : str = 1
lowercase : Any = 20
lowercase : str = 3 # one consistent and two other inconsistent
# start and end destination
lowercase : Any = (0, 0)
lowercase : int = (n - 1, n - 1)
lowercase : str = 1
def lowerCAmelCase__ ( _a : TPos , _a : TPos , _a : int ):
snake_case_ : List[str] = {start: 0, goal: float("inf" )}
snake_case_ : List[Any] = {start: -1, goal: -1}
snake_case_ : Optional[Any] = []
snake_case_ : Dict = set()
for i in range(_a ):
open_list.append(PriorityQueue() )
open_list[i].put(_a , key(_a , _a , _a , _a ) )
snake_case_ : list[int] = []
snake_case_ : list[int] = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , _a ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(_a , _a , _a )
else:
snake_case_ , snake_case_ : Optional[Any] = open_list[i].top_show()
visited.add(_a )
expand_state(
_a , _a , _a , _a , _a , _a , _a , _a , )
close_list_inad.append(_a )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(_a , _a , _a )
else:
snake_case_ : Tuple = open_list[0].top_show()
visited.add(_a )
expand_state(
_a , 0 , _a , _a , _a , _a , _a , _a , )
close_list_anchor.append(_a )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_a ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 114 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class a__ ( A__ ):
def __init__( self :Union[str, Any] , *_lowerCamelCase :List[Any] , **_lowerCamelCase :Optional[int] ):
'''simple docstring'''
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 357 |
"""simple docstring"""
from collections.abc import Callable
class a__ :
def __init__( self :Tuple , _lowerCamelCase :Callable | None = None ):
'''simple docstring'''
UpperCamelCase_ : list =[]
# Stores indexes of each item for supporting updates and deletion.
UpperCamelCase_ : dict ={}
# Stores current size of heap.
UpperCamelCase_ : Any =0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
UpperCamelCase_ : List[str] =key or (lambda _lowerCamelCase : x)
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :int ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : List[str] =int(2 * i + 1 )
return left if 0 < left < self.size else None
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =int(2 * i + 2 )
return right if 0 < right < self.size else None
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :int , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ : Optional[int] =(
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] =self.arr[j], self.arr[i]
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :int , _lowerCamelCase :int ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def lowerCamelCase_ ( self :Any , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : int =self._left(_lowerCamelCase )
UpperCamelCase_ : List[Any] =self._right(_lowerCamelCase )
UpperCamelCase_ : Optional[Any] =i
if left is not None and not self._cmp(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_ : Optional[int] =left
if right is not None and not self._cmp(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_ : List[Any] =right
return valid_parent
def lowerCamelCase_ ( self :Any , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Dict =self._parent(_lowerCamelCase )
while parent is not None and not self._cmp(_lowerCamelCase , _lowerCamelCase ):
self._swap(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_ , UpperCamelCase_ : Dict =parent, self._parent(_lowerCamelCase )
def lowerCamelCase_ ( self :List[str] , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =self._get_valid_parent(_lowerCamelCase )
while valid_parent != index:
self._swap(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_ , UpperCamelCase_ : int =valid_parent, self._get_valid_parent(_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :int , _lowerCamelCase :int ):
'''simple docstring'''
if item not in self.pos_map:
return
UpperCamelCase_ : List[Any] =self.pos_map[item]
UpperCamelCase_ : int =[item, self.key(_lowerCamelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_lowerCamelCase )
self._heapify_down(_lowerCamelCase )
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :int ):
'''simple docstring'''
if item not in self.pos_map:
return
UpperCamelCase_ : Any =self.pos_map[item]
del self.pos_map[item]
UpperCamelCase_ : Dict =self.arr[self.size - 1]
UpperCamelCase_ : Optional[int] =index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_lowerCamelCase )
self._heapify_down(_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[int] , _lowerCamelCase :int , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] =len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_lowerCamelCase )] )
else:
UpperCamelCase_ : str =[item, self.key(_lowerCamelCase )]
UpperCamelCase_ : Optional[int] =self.size
self.size += 1
self._heapify_up(self.size - 1 )
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
return self.arr[0] if self.size else None
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
UpperCamelCase_ : int =self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def A_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 | 1 |
'''simple docstring'''
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowercase = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, nicht wahr?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_lowercase = {
"""wmt16-en-de-dist-12-1""": [28.3, 27.52],
"""wmt16-en-de-dist-6-1""": [27.4, 27.11],
"""wmt16-en-de-12-1""": [26.9, 25.75],
}
_lowercase = F"""{src_lang}-{tgt_lang}"""
_lowercase = F"""\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"""
model_card_dir.mkdir(parents=snake_case_ , exist_ok=snake_case_ )
_lowercase = os.path.join(snake_case_ , """README.md""" )
print(F"""Generating {path}""" )
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as f:
f.write(snake_case_ )
# make sure we are under the root of the project
_lowerCamelCase = Path(__file__).resolve().parent.parent.parent
_lowerCamelCase = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_lowerCamelCase = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 701 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __a :
@staticmethod
def _UpperCAmelCase ( *lowercase__ : List[str] , **lowercase__ : List[Any]) ->Tuple:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __a ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def _UpperCAmelCase ( self : List[str] , lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : str) ->List[Any]:
"""simple docstring"""
_lowercase = ObjectDetectionPipeline(model=lowercase__ , image_processor=lowercase__)
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def _UpperCAmelCase ( self : List[Any] , lowercase__ : Optional[int] , lowercase__ : Dict) ->List[str]:
"""simple docstring"""
_lowercase = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0)
self.assertGreater(len(lowercase__) , 0)
for detected_object in outputs:
self.assertEqual(
lowercase__ , {
"""score""": ANY(lowercase__),
"""label""": ANY(lowercase__),
"""box""": {"""xmin""": ANY(lowercase__), """ymin""": ANY(lowercase__), """xmax""": ANY(lowercase__), """ymax""": ANY(lowercase__)},
} , )
import datasets
_lowercase = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""")
_lowercase = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png"""),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
_lowercase = object_detector(lowercase__ , threshold=0.0)
self.assertEqual(len(lowercase__) , len(lowercase__))
for outputs in batch_outputs:
self.assertGreater(len(lowercase__) , 0)
for detected_object in outputs:
self.assertEqual(
lowercase__ , {
"""score""": ANY(lowercase__),
"""label""": ANY(lowercase__),
"""box""": {"""xmin""": ANY(lowercase__), """ymin""": ANY(lowercase__), """xmax""": ANY(lowercase__), """ymax""": ANY(lowercase__)},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""")
def _UpperCAmelCase ( self : int) ->Dict:
"""simple docstring"""
pass
@require_torch
def _UpperCAmelCase ( self : int) ->Optional[int]:
"""simple docstring"""
_lowercase = """hf-internal-testing/tiny-detr-mobilenetsv3"""
_lowercase = AutoModelForObjectDetection.from_pretrained(lowercase__)
_lowercase = AutoFeatureExtractor.from_pretrained(lowercase__)
_lowercase = ObjectDetectionPipeline(model=lowercase__ , feature_extractor=lowercase__)
_lowercase = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0)
self.assertEqual(
nested_simplify(lowercase__ , decimals=4) , [
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
] , )
_lowercase = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(lowercase__ , decimals=4) , [
[
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
],
[
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
],
] , )
@require_torch
@slow
def _UpperCAmelCase ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowercase = """facebook/detr-resnet-50"""
_lowercase = AutoModelForObjectDetection.from_pretrained(lowercase__)
_lowercase = AutoFeatureExtractor.from_pretrained(lowercase__)
_lowercase = ObjectDetectionPipeline(model=lowercase__ , feature_extractor=lowercase__)
_lowercase = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""")
self.assertEqual(
nested_simplify(lowercase__ , decimals=4) , [
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
] , )
_lowercase = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
])
self.assertEqual(
nested_simplify(lowercase__ , decimals=4) , [
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
],
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
],
] , )
@require_torch
@slow
def _UpperCAmelCase ( self : Union[str, Any]) ->List[Any]:
"""simple docstring"""
_lowercase = """facebook/detr-resnet-50"""
_lowercase = pipeline("""object-detection""" , model=lowercase__)
_lowercase = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""")
self.assertEqual(
nested_simplify(lowercase__ , decimals=4) , [
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
] , )
_lowercase = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
])
self.assertEqual(
nested_simplify(lowercase__ , decimals=4) , [
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
],
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
],
] , )
@require_torch
@slow
def _UpperCAmelCase ( self : Tuple) ->str:
"""simple docstring"""
_lowercase = 0.9985
_lowercase = """facebook/detr-resnet-50"""
_lowercase = pipeline("""object-detection""" , model=lowercase__)
_lowercase = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=lowercase__)
self.assertEqual(
nested_simplify(lowercase__ , decimals=4) , [
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def _UpperCAmelCase ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowercase = """Narsil/layoutlmv3-finetuned-funsd"""
_lowercase = 0.9993
_lowercase = pipeline("""object-detection""" , model=lowercase__ , threshold=lowercase__)
_lowercase = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""")
self.assertEqual(
nested_simplify(lowercase__ , decimals=4) , [
{"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 2_94, """ymin""": 2_54, """xmax""": 3_43, """ymax""": 2_64}},
{"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 2_94, """ymin""": 2_54, """xmax""": 3_43, """ymax""": 2_64}},
] , )
| 572 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = '▁'
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'sentencepiece.bpe.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
SCREAMING_SNAKE_CASE_ = {
'xlm-roberta-base': 5_12,
'xlm-roberta-large': 5_12,
'xlm-roberta-large-finetuned-conll02-dutch': 5_12,
'xlm-roberta-large-finetuned-conll02-spanish': 5_12,
'xlm-roberta-large-finetuned-conll03-english': 5_12,
'xlm-roberta-large-finetuned-conll03-german': 5_12,
}
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :Optional[Any] =VOCAB_FILES_NAMES
a_ :List[Any] =PRETRAINED_VOCAB_FILES_MAP
a_ :int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ :Any =["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE__ : str="<unk>" , SCREAMING_SNAKE_CASE__ : List[str]="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<mask>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Dict , ):
'''simple docstring'''
__a = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
__a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__a = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__a = 1
__a = len(self.sp_model ) + self.fairseq_offset
__a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Dict ):
'''simple docstring'''
__a = self.__dict__.copy()
__a = None
__a = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __a ( self : int , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __a ( self : Any , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self : int ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __a ( self : Tuple ):
'''simple docstring'''
__a = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self : Dict , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __a ( self : int , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __a ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
__a = """""".join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , """ """ ).strip()
return out_string
def __a ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , """wb""" ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 582 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : NestedDataStructureLike[PathLike] , SCREAMING_SNAKE_CASE__ : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE__ : Optional[Features] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__a = field
__a = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
__a = Json(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , field=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __a ( self : Dict ):
'''simple docstring'''
if self.streaming:
__a = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__a = None
__a = None
__a = None
__a = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
__a = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Dataset , SCREAMING_SNAKE_CASE__ : Union[PathLike, BinaryIO] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
__a = dataset
__a = path_or_buf
__a = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__a = num_proc
__a = """utf-8"""
__a = to_json_kwargs
def __a ( self : int ):
'''simple docstring'''
__a = self.to_json_kwargs.pop("""path_or_buf""" , SCREAMING_SNAKE_CASE__ )
__a = self.to_json_kwargs.pop("""orient""" , """records""" )
__a = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
__a = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
__a = self.to_json_kwargs.pop("""compression""" , SCREAMING_SNAKE_CASE__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=SCREAMING_SNAKE_CASE__ ) as buffer:
__a = self._write(file_obj=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
""" was passed. Please provide a local path instead.""" )
__a = self._write(
file_obj=self.path_or_buf , orient=SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ , **self.to_json_kwargs )
return written
def __a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
__a , __a , __a , __a , __a = args
__a = query_table(
table=self.dataset.data , key=slice(SCREAMING_SNAKE_CASE__ , offset + self.batch_size ) , indices=self.dataset._indices , )
__a = batch.to_pandas().to_json(
path_or_buf=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def __a ( self : Tuple , SCREAMING_SNAKE_CASE__ : BinaryIO , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
'''simple docstring'''
__a = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
__a = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(SCREAMING_SNAKE_CASE__ )
else:
__a , __a = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(SCREAMING_SNAKE_CASE__ )
return written
| 582 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ="""▁"""
__lowerCAmelCase : str ={"""vocab_file""": """sentencepiece.bpe.model"""}
__lowerCAmelCase : List[str] ={
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"""
),
}
}
__lowerCAmelCase : Tuple ={
"""facebook/nllb-200-distilled-600M""": 1_0_2_4,
}
# fmt: off
__lowerCAmelCase : int =["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class _A ( lowerCAmelCase ):
snake_case__ : List[Any] = VOCAB_FILES_NAMES
snake_case__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : str = ['input_ids', 'attention_mask']
snake_case__ : int = []
snake_case__ : Tuple = []
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = None , __lowerCAmelCase=None , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase = legacy_behaviour
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , tokenizer_file=__A , src_lang=__A , tgt_lang=__A , additional_special_tokens=__A , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__A , **__A , )
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
lowercase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase = 1
lowercase = len(self.sp_model )
lowercase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__A )
}
lowercase = {v: k for k, v in self.lang_code_to_id.items()}
lowercase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowercase = src_lang if src_lang is not None else """eng_Latn"""
lowercase = self.lang_code_to_id[self._src_lang]
lowercase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
lowercase = self.__dict__.copy()
lowercase = None
lowercase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def A__ ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def A__ ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
lowercase = [1] * len(self.prefix_tokens )
lowercase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__A )) + suffix_ones
return prefix_ones + ([0] * len(__A )) + ([0] * len(__A )) + suffix_ones
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
lowercase = src_lang
lowercase = self(__A , add_special_tokens=__A , return_tensors=__A , **__A )
lowercase = self.convert_tokens_to_ids(__A )
lowercase = tgt_lang_id
return inputs
def A__ ( self ):
"""simple docstring"""
lowercase = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(__A , out_type=__A )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase = self.sp_model.PieceToId(__A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = """""".join(__A ).replace(__A , """ """ ).strip()
return out_string
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = "eng_Latn" , __lowerCAmelCase = None , __lowerCAmelCase = "fra_Latn" , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = src_lang
lowercase = tgt_lang
return super().prepare_seqaseq_batch(__A , __A , **__A )
def A__ ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def A__ ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
lowercase = []
lowercase = [self.eos_token_id, self.cur_lang_code]
else:
lowercase = [self.cur_lang_code]
lowercase = [self.eos_token_id]
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.lang_code_to_id[lang]
if self.legacy_behaviour:
lowercase = []
lowercase = [self.eos_token_id, self.cur_lang_code]
else:
lowercase = [self.cur_lang_code]
lowercase = [self.eos_token_id]
| 718 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
__lowerCAmelCase : Tuple ={
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _A ( lowerCAmelCase ):
snake_case__ : int = 'blenderbot-small'
snake_case__ : Optional[Any] = ['past_key_values']
snake_case__ : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __lowerCAmelCase=5_0265 , __lowerCAmelCase=512 , __lowerCAmelCase=8 , __lowerCAmelCase=2048 , __lowerCAmelCase=16 , __lowerCAmelCase=8 , __lowerCAmelCase=2048 , __lowerCAmelCase=16 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="gelu" , __lowerCAmelCase=512 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1 , __lowerCAmelCase=False , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = use_cache
lowercase = encoder_layers
lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , forced_eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
class _A ( lowerCAmelCase ):
@property
def A__ ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowercase = {0: """batch"""}
lowercase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowercase = {0: """batch""", 1: """decoder_sequence"""}
lowercase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowercase , lowercase = self.num_layers
for i in range(__lowerCAmelCase ):
lowercase = {0: """batch""", 2: """past_sequence + sequence"""}
lowercase = {0: """batch""", 2: """past_sequence + sequence"""}
else:
lowercase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def A__ ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase = super().outputs
else:
lowercase = super(__lowerCAmelCase , self ).outputs
if self.use_past:
lowercase , lowercase = self.num_layers
for i in range(__lowerCAmelCase ):
lowercase = {0: """batch""", 2: """past_sequence + sequence"""}
lowercase = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ):
"""simple docstring"""
lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Generate decoder inputs
lowercase = seq_length if not self.use_past else 1
lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
lowercase = dict(**__lowerCAmelCase , **__lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowercase , lowercase = common_inputs["""input_ids"""].shape
lowercase = common_inputs["""decoder_input_ids"""].shape[1]
lowercase , lowercase = self.num_attention_heads
lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase = decoder_seq_length + 3
lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(__lowerCAmelCase , __lowerCAmelCase )] , dim=1 )
lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase , lowercase = self.num_layers
lowercase = min(__lowerCAmelCase , __lowerCAmelCase )
lowercase = max(__lowerCAmelCase , __lowerCAmelCase ) - min_num_layers
lowercase = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(__lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
torch.zeros(__lowerCAmelCase ),
) )
# TODO: test this.
lowercase = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(__lowerCAmelCase , __lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) )
return common_inputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ):
"""simple docstring"""
lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowercase , lowercase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowercase = seqlen + 2
lowercase , lowercase = self.num_layers
lowercase , lowercase = self.num_attention_heads
lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase = common_inputs["""attention_mask"""].dtype
lowercase = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(__lowerCAmelCase , __lowerCAmelCase , dtype=__lowerCAmelCase )] , dim=1 )
lowercase = [
(torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(__lowerCAmelCase )
]
return common_inputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ):
"""simple docstring"""
lowercase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase = tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
lowercase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowercase = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase = dict(tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase ) )
return common_inputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
elif self.task == "causal-lm":
lowercase = self._generate_dummy_inputs_for_causal_lm(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
else:
lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
return common_inputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowercase = super()._flatten_past_key_values_(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
lowercase = super(__lowerCAmelCase , self )._flatten_past_key_values_(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
| 197 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a__ = {
"""configuration_bridgetower""": [
"""BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BridgeTowerConfig""",
"""BridgeTowerTextConfig""",
"""BridgeTowerVisionConfig""",
],
"""processing_bridgetower""": ["""BridgeTowerProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ["""BridgeTowerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BridgeTowerForContrastiveLearning""",
"""BridgeTowerForImageAndTextRetrieval""",
"""BridgeTowerForMaskedLM""",
"""BridgeTowerModel""",
"""BridgeTowerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 477 |
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowerCAmelCase = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCAmelCase = concatenate_datasets
__lowerCAmelCase = DownloadConfig
__lowerCAmelCase = DownloadManager
__lowerCAmelCase = DownloadMode
__lowerCAmelCase = DownloadConfig
__lowerCAmelCase = DownloadMode
__lowerCAmelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager | 585 | 0 |
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Optional[Any]=99 , UpperCAmelCase_ : int=64 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : List[Any]=64 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Tuple=None , ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def __lowerCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def __lowerCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = MPNetModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = MPNetForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MPNetForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = MPNetForMultipleChoice(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MPNetForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_: int = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_: Optional[Any] = False
SCREAMING_SNAKE_CASE_: Dict = True
def __lowerCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = MPNetModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def __lowerCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*UpperCAmelCase_ )
def __lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*UpperCAmelCase_ )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = MPNetModel.from_pretrained('microsoft/mpnet-base' )
_lowerCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_lowerCAmelCase = model(UpperCAmelCase_ )[0]
_lowerCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_lowerCAmelCase = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 491 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = 10
def __lowerCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = [1, 2, 3, 4]
_lowerCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase_ , self.block_size , 0 ) , UpperCAmelCase_ )
def __lowerCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase_ , self.block_size , 0 ) , UpperCAmelCase_ )
def __lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase_ , self.block_size , 0 ) , UpperCAmelCase_ )
def __lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
_lowerCAmelCase , _lowerCAmelCase = process_story(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , [] )
def __lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = ''
_lowerCAmelCase , _lowerCAmelCase = process_story(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , [] )
self.assertEqual(UpperCAmelCase_ , [] )
def __lowerCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
_lowerCAmelCase , _lowerCAmelCase = process_story(UpperCAmelCase_ )
_lowerCAmelCase = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = ['It was the best of times.']
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
_lowerCAmelCase = torch.tensor([1, 2, 3, 4] )
_lowerCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase_ , 0 ).numpy() , expected.numpy() )
def __lowerCamelCase ( self : int ) -> str:
"""simple docstring"""
_lowerCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_lowerCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase_ , 23 ).numpy() , expected.numpy() )
def __lowerCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_lowerCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase_ , 1 ).numpy() , expected.numpy() )
def __lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
_lowerCAmelCase = 101
_lowerCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_lowerCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_lowerCAmelCase = compute_token_type_ids(UpperCAmelCase_ , UpperCAmelCase_ )
np.testing.assert_array_equal(UpperCAmelCase_ , UpperCAmelCase_ )
| 491 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_snake_case = logging.get_logger(__name__)
# General docstring
_snake_case = "RegNetConfig"
# Base docstring
_snake_case = "facebook/regnet-y-040"
_snake_case = [1, 1088, 7, 7]
# Image classification docstring
_snake_case = "facebook/regnet-y-040"
_snake_case = "tabby, tabby cat"
_snake_case = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase_ ( tf.keras.layers.Layer):
def __init__( self, __a, __a = 3, __a = 1, __a = 1, __a = "relu", **__a, ):
'''simple docstring'''
super().__init__(**__a)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_lowerCAmelCase : Optional[int] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2)
_lowerCAmelCase : List[str] = tf.keras.layers.ConvaD(
filters=__a, kernel_size=__a, strides=__a, padding="VALID", groups=__a, use_bias=__a, name="convolution", )
_lowerCAmelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5, momentum=0.9, name="normalization")
_lowerCAmelCase : List[Any] = ACTaFN[activation] if activation is not None else tf.identity
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.convolution(self.padding(__a))
_lowerCAmelCase : str = self.normalization(__a)
_lowerCAmelCase : Optional[Any] = self.activation(__a)
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer):
def __init__( self, __a, **__a):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = config.num_channels
_lowerCAmelCase : Tuple = TFRegNetConvLayer(
out_channels=config.embedding_size, kernel_size=3, stride=2, activation=config.hidden_act, name="embedder", )
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : str = shape_list(__a)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration.")
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_lowerCAmelCase : int = tf.transpose(__a, perm=(0, 2, 3, 1))
_lowerCAmelCase : Tuple = self.embedder(__a)
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer):
def __init__( self, __a, __a = 2, **__a):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Any = tf.keras.layers.ConvaD(
filters=__a, kernel_size=1, strides=__a, use_bias=__a, name="convolution")
_lowerCAmelCase : Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5, momentum=0.9, name="normalization")
def snake_case__ ( self, __a, __a = False):
'''simple docstring'''
return self.normalization(self.convolution(__a), training=__a)
class UpperCAmelCase_ ( tf.keras.layers.Layer):
def __init__( self, __a, __a, **__a):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Tuple = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__a, name="pooler")
_lowerCAmelCase : Any = [
tf.keras.layers.ConvaD(filters=__a, kernel_size=1, activation="relu", name="attention.0"),
tf.keras.layers.ConvaD(filters=__a, kernel_size=1, activation="sigmoid", name="attention.2"),
]
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.pooler(__a)
for layer_module in self.attention:
_lowerCAmelCase : Any = layer_module(__a)
_lowerCAmelCase : Optional[int] = hidden_state * pooled
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer):
def __init__( self, __a, __a, __a, __a = 1, **__a):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = in_channels != out_channels or stride != 1
_lowerCAmelCase : Dict = max(1, out_channels // config.groups_width)
_lowerCAmelCase : Tuple = (
TFRegNetShortCut(__a, stride=__a, name="shortcut")
if should_apply_shortcut
else tf.keras.layers.Activation("linear", name="shortcut")
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_lowerCAmelCase : List[Any] = [
TFRegNetConvLayer(__a, kernel_size=1, activation=config.hidden_act, name="layer.0"),
TFRegNetConvLayer(
__a, stride=__a, groups=__a, activation=config.hidden_act, name="layer.1"),
TFRegNetConvLayer(__a, kernel_size=1, activation=__a, name="layer.2"),
]
_lowerCAmelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_state
for layer_module in self.layers:
_lowerCAmelCase : str = layer_module(__a)
_lowerCAmelCase : Tuple = self.shortcut(__a)
hidden_state += residual
_lowerCAmelCase : str = self.activation(__a)
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer):
def __init__( self, __a, __a, __a, __a = 1, **__a):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Optional[int] = in_channels != out_channels or stride != 1
_lowerCAmelCase : Any = max(1, out_channels // config.groups_width)
_lowerCAmelCase : int = (
TFRegNetShortCut(__a, stride=__a, name="shortcut")
if should_apply_shortcut
else tf.keras.layers.Activation("linear", name="shortcut")
)
_lowerCAmelCase : List[str] = [
TFRegNetConvLayer(__a, kernel_size=1, activation=config.hidden_act, name="layer.0"),
TFRegNetConvLayer(
__a, stride=__a, groups=__a, activation=config.hidden_act, name="layer.1"),
TFRegNetSELayer(__a, reduced_channels=int(round(in_channels / 4)), name="layer.2"),
TFRegNetConvLayer(__a, kernel_size=1, activation=__a, name="layer.3"),
]
_lowerCAmelCase : Any = ACTaFN[config.hidden_act]
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int = hidden_state
for layer_module in self.layers:
_lowerCAmelCase : Optional[int] = layer_module(__a)
_lowerCAmelCase : int = self.shortcut(__a)
hidden_state += residual
_lowerCAmelCase : Any = self.activation(__a)
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer):
def __init__( self, __a, __a, __a, __a = 2, __a = 2, **__a):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Any = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
_lowerCAmelCase : int = [
# downsampling is done in the first layer with stride of 2
layer(__a, __a, __a, stride=__a, name="layers.0"),
*[layer(__a, __a, __a, name=f"layers.{i+1}") for i in range(depth - 1)],
]
def snake_case__ ( self, __a):
'''simple docstring'''
for layer_module in self.layers:
_lowerCAmelCase : str = layer_module(__a)
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer):
def __init__( self, __a, **__a):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Union[str, Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__a, config.embedding_size, config.hidden_sizes[0], stride=2 if config.downsample_in_first_stage else 1, depth=config.depths[0], name="stages.0", ))
_lowerCAmelCase : int = zip(config.hidden_sizes, config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(__a, config.depths[1:])):
self.stages.append(TFRegNetStage(__a, __a, __a, depth=__a, name=f"stages.{i+1}"))
def snake_case__ ( self, __a, __a = False, __a = True):
'''simple docstring'''
_lowerCAmelCase : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase : str = hidden_states + (hidden_state,)
_lowerCAmelCase : Dict = stage_module(__a)
if output_hidden_states:
_lowerCAmelCase : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=__a, hidden_states=__a)
@keras_serializable
class UpperCAmelCase_ ( tf.keras.layers.Layer):
lowerCamelCase__ = RegNetConfig
def __init__( self, __a, **__a):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : List[str] = config
_lowerCAmelCase : List[str] = TFRegNetEmbeddings(__a, name="embedder")
_lowerCAmelCase : str = TFRegNetEncoder(__a, name="encoder")
_lowerCAmelCase : str = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__a, name="pooler")
@unpack_inputs
def snake_case__ ( self, __a, __a = None, __a = None, __a = False, ):
'''simple docstring'''
_lowerCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Optional[int] = self.embedder(__a, training=__a)
_lowerCAmelCase : Optional[int] = self.encoder(
__a, output_hidden_states=__a, return_dict=__a, training=__a)
_lowerCAmelCase : Union[str, Any] = encoder_outputs[0]
_lowerCAmelCase : Dict = self.pooler(__a)
# Change to NCHW output format have uniformity in the modules
_lowerCAmelCase : Union[str, Any] = tf.transpose(__a, perm=(0, 3, 1, 2))
_lowerCAmelCase : Any = tf.transpose(__a, perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_lowerCAmelCase : Any = tuple([tf.transpose(__a, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__a, pooler_output=__a, hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states, )
class UpperCAmelCase_ ( a):
lowerCamelCase__ = RegNetConfig
lowerCamelCase__ = 'regnet'
lowerCamelCase__ = 'pixel_values'
@property
def snake_case__ ( self):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224), dtype=tf.floataa)}
_snake_case = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
_snake_case = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , a , )
class UpperCAmelCase_ ( a):
def __init__( self, __a, *__a, **__a):
'''simple docstring'''
super().__init__(__a, *__a, **__a)
_lowerCAmelCase : Union[str, Any] = TFRegNetMainLayer(__a, name="regnet")
@unpack_inputs
@add_start_docstrings_to_model_forward(__a)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC, output_type=__a, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, )
def snake_case__ ( self, __a, __a = None, __a = None, __a=False, ):
'''simple docstring'''
_lowerCAmelCase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : List[Any] = self.regnet(
pixel_values=__a, output_hidden_states=__a, return_dict=__a, training=__a, )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state, pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , a , )
class UpperCAmelCase_ ( a , a):
def __init__( self, __a, *__a, **__a):
'''simple docstring'''
super().__init__(__a, *__a, **__a)
_lowerCAmelCase : List[Any] = config.num_labels
_lowerCAmelCase : Tuple = TFRegNetMainLayer(__a, name="regnet")
# classification head
_lowerCAmelCase : Optional[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels, name="classifier.1") if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__a)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=__a, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, )
def snake_case__ ( self, __a = None, __a = None, __a = None, __a = None, __a=False, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : int = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Tuple = self.regnet(
__a, output_hidden_states=__a, return_dict=__a, training=__a)
_lowerCAmelCase : List[str] = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase : int = self.classifier[0](__a)
_lowerCAmelCase : List[str] = self.classifier[1](__a)
_lowerCAmelCase : Optional[int] = None if labels is None else self.hf_compute_loss(labels=__a, logits=__a)
if not return_dict:
_lowerCAmelCase : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__a, logits=__a, hidden_states=outputs.hidden_states)
| 500 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_snake_case = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_snake_case = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
_lowerCAmelCase : str = bs[:]
_lowerCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
_lowerCAmelCase : Optional[Any] = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = set()
_lowerCAmelCase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self, __a, __a, __a="replace", __a="<s>", __a="</s>", __a="</s>", __a="<s>", __a="<unk>", __a="<pad>", __a="<mask>", __a=False, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else bos_token
_lowerCAmelCase : List[str] = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else eos_token
_lowerCAmelCase : str = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else sep_token
_lowerCAmelCase : Tuple = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else cls_token
_lowerCAmelCase : List[str] = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else unk_token
_lowerCAmelCase : Tuple = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : str = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else mask_token
super().__init__(
errors=__a, bos_token=__a, eos_token=__a, unk_token=__a, sep_token=__a, cls_token=__a, pad_token=__a, mask_token=__a, add_prefix_space=__a, **__a, )
with open(__a, encoding="utf-8") as vocab_handle:
_lowerCAmelCase : str = json.load(__a)
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Any = errors # how to handle errors in decoding
_lowerCAmelCase : str = bytes_to_unicode()
_lowerCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(__a, encoding="utf-8") as merges_handle:
_lowerCAmelCase : int = merges_handle.read().split("\n")[1:-1]
_lowerCAmelCase : Union[str, Any] = [tuple(merge.split()) for merge in bpe_merges]
_lowerCAmelCase : List[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCAmelCase : Any = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
def snake_case__ ( self):
'''simple docstring'''
return len(self.encoder)
def snake_case__ ( self):
'''simple docstring'''
return dict(self.encoder, **self.added_tokens_encoder)
def snake_case__ ( self, __a):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : List[Any] = tuple(__a)
_lowerCAmelCase : int = get_pairs(__a)
if not pairs:
return token
while True:
_lowerCAmelCase : List[Any] = min(__a, key=lambda __a: self.bpe_ranks.get(__a, float("inf")))
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = bigram
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : int = 0
while i < len(__a):
try:
_lowerCAmelCase : Union[str, Any] = word.index(__a, __a)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_lowerCAmelCase : List[str] = j
if word[i] == first and i < len(__a) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_lowerCAmelCase : Union[str, Any] = tuple(__a)
_lowerCAmelCase : List[str] = new_word
if len(__a) == 1:
break
else:
_lowerCAmelCase : Any = get_pairs(__a)
_lowerCAmelCase : str = " ".join(__a)
_lowerCAmelCase : Tuple = word
return word
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = []
for token in re.findall(self.pat, __a):
_lowerCAmelCase : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__a).split(" "))
return bpe_tokens
def snake_case__ ( self, __a):
'''simple docstring'''
return self.encoder.get(__a, self.encoder.get(self.unk_token))
def snake_case__ ( self, __a):
'''simple docstring'''
return self.decoder.get(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int = "".join(__a)
_lowerCAmelCase : Any = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
return text
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not os.path.isdir(__a):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
_lowerCAmelCase : List[Any] = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Any = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__a, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=__a, ensure_ascii=__a) + "\n")
_lowerCAmelCase : Tuple = 0
with open(__a, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda __a: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
_lowerCAmelCase : Any = token_index
writer.write(" ".join(__a) + "\n")
index += 1
return vocab_file, merge_file
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Dict = [self.cls_token_id]
_lowerCAmelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self, __a, __a = None, __a = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a, token_ids_a=__a, already_has_special_tokens=__a)
if token_ids_a is None:
return [1] + ([0] * len(__a)) + [1]
return [1] + ([0] * len(__a)) + [1, 1] + ([0] * len(__a)) + [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Any = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def snake_case__ ( self, __a, __a=False, **__a):
'''simple docstring'''
_lowerCAmelCase : str = kwargs.pop("add_prefix_space", self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(__a) > 0 and not text[0].isspace()):
_lowerCAmelCase : int = " " + text
return (text, kwargs)
| 500 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( )-> List[Any]:
_lowerCamelCase = 0
for i in range(1 , 1_001 ):
total += i**i
return str(snake_case )[-10:]
if __name__ == "__main__":
print(solution())
| 222 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> int:
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> bool:
_lowerCamelCase = 0
_lowerCamelCase = number
while duplicate > 0:
_lowerCamelCase , _lowerCamelCase = divmod(snake_case , 10 )
fact_sum += factorial(snake_case )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
A_ : Dict =int(input("""Enter number: """).strip())
print(
f'{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.'
)
| 222 | 1 |
'''simple docstring'''
import os
def snake_case_ ( ) -> int:
lowerCAmelCase_ = os.path.join(os.path.dirname(__snake_case) , '''num.txt''')
with open(__snake_case) as file_hand:
return str(sum(int(__snake_case) for line in file_hand))[:10]
if __name__ == "__main__":
print(solution())
| 274 | '''simple docstring'''
def snake_case_ ( __snake_case : int , __snake_case : int , __snake_case : int) -> float:
lowerCAmelCase_ = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def snake_case_ ( ) -> str:
print(sum_of_series(1 , 1 , 10))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 274 | 1 |
def a ( snake_case__: int = 1_000 ):
'''simple docstring'''
lowercase_ = -1
lowercase_ = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowercase_ = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowercase_ = n - a - b
if c * c == (a * a + b * b):
lowercase_ = a * b * c
if candidate >= product:
lowercase_ = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 721 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__a = 5_0_0_0_0
__a = 5_0_0_0
__a , __a = os.path.split(__file__)
__a = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( snake_case__: datasets.Dataset , snake_case__: Tuple ):
'''simple docstring'''
for i in range(snake_case__ ):
lowercase_ = dataset[i]
@get_duration
def a ( snake_case__: datasets.Dataset , snake_case__: List[str] , snake_case__: Tuple ):
'''simple docstring'''
for i in range(0 , len(snake_case__ ) , snake_case__ ):
lowercase_ = dataset[i : i + batch_size]
@get_duration
def a ( snake_case__: datasets.Dataset , snake_case__: Tuple , snake_case__: Optional[Any] ):
'''simple docstring'''
with dataset.formatted_as(type=snake_case__ ):
for i in range(snake_case__ ):
lowercase_ = dataset[i]
@get_duration
def a ( snake_case__: datasets.Dataset , snake_case__: List[Any] , snake_case__: Dict , snake_case__: Any ):
'''simple docstring'''
with dataset.formatted_as(type=snake_case__ ):
for i in range(0 , snake_case__ , snake_case__ ):
lowercase_ = dataset[i : i + batch_size]
def a ( ):
'''simple docstring'''
lowercase_ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
lowercase_ = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_000}),
]
lowercase_ = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
lowercase_ = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
lowercase_ = generate_example_dataset(
os.path.join(snake_case__ , '''dataset.arrow''' ) , snake_case__ , num_examples=snake_case__ , seq_shapes={'''list''': (100,)} , )
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ , str(snake_case__ ) )
lowercase_ = func(snake_case__ , **snake_case__ )
print('''shuffling dataset''' )
lowercase_ = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' , func.__name__ , str(snake_case__ ) )
lowercase_ = func(
snake_case__ , **snake_case__ )
with open(snake_case__ , '''wb''' ) as f:
f.write(json.dumps(snake_case__ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 409 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : Union[str, Any] = batch_size
_lowerCAmelCase : Tuple = seq_length
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : Any = use_token_type_ids
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Optional[int] = intermediate_size
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : Optional[int] = type_vocab_size
_lowerCAmelCase : Union[str, Any] = type_sequence_label_size
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Tuple = num_labels
_lowerCAmelCase : str = num_choices
_lowerCAmelCase : Union[str, Any] = scope
_lowerCAmelCase : Any = self.vocab_size - 1
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Optional[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_lowerCAmelCase : str = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = OpenAIGPTModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_lowerCAmelCase : List[Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase )
_lowerCAmelCase : List[Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = OpenAIGPTLMHeadModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_lowerCAmelCase : List[str] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = OpenAIGPTDoubleHeadsModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_lowerCAmelCase : List[str] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Dict = OpenAIGPTForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Any = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) : Any = config_and_inputs
_lowerCAmelCase : Tuple = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__magic_name__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__magic_name__ = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def a ( self , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_lowerCAmelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase , )
_lowerCAmelCase : Dict = inputs_dict["labels"]
_lowerCAmelCase : Optional[Any] = inputs_dict["labels"]
_lowerCAmelCase : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__UpperCAmelCase , )
_lowerCAmelCase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = OpenAIGPTModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__UpperCAmelCase )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__UpperCAmelCase )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__UpperCAmelCase )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__UpperCAmelCase )
@slow
def a ( self ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : int = OpenAIGPTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(__UpperCAmelCase )
_lowerCAmelCase : List[Any] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__UpperCAmelCase ) # the president is
_lowerCAmelCase : int = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_lowerCAmelCase : List[Any] = model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase )
self.assertListEqual(output_ids[0].tolist() , __UpperCAmelCase )
| 444 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
a__ : str = TOKENIZER_CLASSES
else:
a__ : int = {tokenizer_name: getattr(__UpperCamelCase , tokenizer_name + "Fast" )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
a__ : Any = TOKENIZER_CLASSES[tokenizer_name]
a__ : Dict = True
if checkpoint_name is None:
a__ : Tuple = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a__ : List[str] = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
a__ : List[str] = tokenizer_class.from_pretrained(__UpperCamelCase , force_download=__UpperCamelCase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
a__ , a__ : Dict = checkpoint.split("/" )
a__ : Any = os.path.join(__UpperCamelCase , __UpperCamelCase )
elif add_prefix:
a__ : int = checkpoint
a__ : Optional[Any] = dump_path
else:
a__ : Dict = None
a__ : Optional[Any] = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a__ : Dict = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a__ : Optional[Any] = file_path.split(__UpperCamelCase )[-1][0]
if next_char == "/":
a__ : Union[str, Any] = os.path.join(__UpperCamelCase , __UpperCamelCase )
a__ : Any = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
a__ : Any = tokenizer.save_pretrained(
__UpperCamelCase , legacy_format=__UpperCamelCase , filename_prefix=__UpperCamelCase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(__UpperCamelCase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCamelCase = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 191 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowerCamelCase (A__ ):
lowerCamelCase__ : List[str] = 'Salesforce/blip-image-captioning-base'
lowerCamelCase__ : Union[str, Any] = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
lowerCamelCase__ : Tuple = 'image_captioner'
lowerCamelCase__ : List[str] = AutoModelForVisionaSeq
lowerCamelCase__ : Optional[Any] = ['image']
lowerCamelCase__ : str = ['text']
def __init__( self : List[Any] , *__UpperCAmelCase : int , **__UpperCAmelCase : Tuple ) -> Tuple:
requires_backends(self , ["""vision"""] )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : "Image" ) -> Tuple:
return self.pre_processor(images=__UpperCAmelCase , return_tensors="""pt""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : str ) -> List[Any]:
return self.model.generate(**__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Dict ) -> Dict:
return self.pre_processor.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )[0].strip()
| 616 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
A_ : Tuple = logging.getLogger(__name__)
A_ : Tuple = "Hello world! cécé herlolip"
A_ : Dict = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = BertAbsConfig(
temp_dir=""".""" , finetune_bert=snake_case__ , large=snake_case__ , share_emb=snake_case__ , use_bert_emb=snake_case__ , encoder="""bert""" , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE__ = torch.load(snake_case__ , lambda snake_case__ , snake_case__ : storage )
SCREAMING_SNAKE_CASE__ = AbsSummarizer(snake_case__ , torch.device("""cpu""" ) , snake_case__ )
original.eval()
SCREAMING_SNAKE_CASE__ = BertAbsSummarizer(snake_case__ , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
SCREAMING_SNAKE_CASE__ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(snake_case__ )) )
SCREAMING_SNAKE_CASE__ = torch.tensor(snake_case__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(snake_case__ )) )
SCREAMING_SNAKE_CASE__ = torch.tensor(snake_case__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
SCREAMING_SNAKE_CASE__ = encoder_input_ids
SCREAMING_SNAKE_CASE__ = decoder_input_ids
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE__ = original(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )[0]
SCREAMING_SNAKE_CASE__ = original.generator(snake_case__ )
SCREAMING_SNAKE_CASE__ = new_model(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )[0]
SCREAMING_SNAKE_CASE__ = new_model.generator(snake_case__ )
SCREAMING_SNAKE_CASE__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(snake_case__ ) )
SCREAMING_SNAKE_CASE__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(snake_case__ ) )
SCREAMING_SNAKE_CASE__ = torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
A_ : Tuple = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 616 | 1 |
def __magic_name__ ( __a : int = 1 , __a : int = 1_000 ):
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = 0
for divide_by_number in range(__a , digit + 1 ):
UpperCamelCase__ = []
UpperCamelCase__ = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__a ):
UpperCamelCase__ = len(__a )
UpperCamelCase__ = divide_by_number
else:
has_been_divided.append(__a )
UpperCamelCase__ = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 513 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = embedding_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ (self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = MobileBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = MobileBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = MobileBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = MobileBertForPreTraining(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , next_sentence_label=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = MobileBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MobileBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MobileBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = MobileBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = True
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
UpperCamelCase__ = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
return inputs_dict
def UpperCAmelCase_ (self ):
UpperCamelCase__ = MobileBertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase_ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( __a : List[str] ):
'''simple docstring'''
return torch.tensor(
__a , dtype=torch.long , device=__a , )
lowerCamelCase_ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class __A( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase__ = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor(
[
[
[-2.4736526E07, 8.2691656E04, 1.6521838E05],
[-5.7541704E-01, 3.9056022E00, 4.4011507E00],
[2.6047359E00, 1.5677652E00, -1.7324188E-01],
]
] , device=SCREAMING_SNAKE_CASE_ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCamelCase__ = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCamelCase__ = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 513 | 1 |
def UpperCAmelCase__ ( _A ):
"""simple docstring"""
return 10 - x * x
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
if equation(a_ ) * equation(a_ ) >= 0:
raise ValueError('''Wrong space!''' )
a_ = a
while (b - a) >= 0.01:
# Find middle point
a_ = (a + b) / 2
# Check if middle point is root
if equation(a_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(a_ ) * equation(a_ ) < 0:
a_ = c
else:
a_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 713 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase__ = None
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase__ = {
'''facebook/nllb-large-en-ro''': 1_024,
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
UpperCamelCase__ = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __lowercase ( a__ ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = ["input_ids", "attention_mask"]
_lowerCAmelCase = NllbTokenizer
_lowerCAmelCase = []
_lowerCAmelCase = []
def __init__( self : Optional[int] , lowercase__ : Dict=None , lowercase__ : int=None , lowercase__ : Union[str, Any]="<s>" , lowercase__ : str="</s>" , lowercase__ : str="</s>" , lowercase__ : Union[str, Any]="<s>" , lowercase__ : Optional[int]="<unk>" , lowercase__ : Any="<pad>" , lowercase__ : List[Any]="<mask>" , lowercase__ : Optional[int]=None , lowercase__ : Union[str, Any]=None , lowercase__ : str=None , lowercase__ : Optional[Any]=False , **lowercase__ : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
a_ = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
a_ = legacy_behaviour
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , legacy_behaviour=lowercase__ , **lowercase__ , )
a_ = vocab_file
a_ = False if not self.vocab_file else True
a_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
a_ = {
lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a_ = src_lang if src_lang is not None else '''eng_Latn'''
a_ = self.convert_tokens_to_ids(self._src_lang )
a_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __magic_name__ ( self : Union[str, Any] ):
return self._src_lang
@src_lang.setter
def __magic_name__ ( self : Optional[Any] , lowercase__ : str ):
a_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ ( self : Tuple , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self : int , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None ):
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self : Tuple , lowercase__ : Tuple , lowercase__ : str , lowercase__ : Optional[str] , lowercase__ : Optional[str] , **lowercase__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
a_ = src_lang
a_ = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
a_ = self.convert_tokens_to_ids(lowercase__ )
a_ = tgt_lang_id
return inputs
def __magic_name__ ( self : List[Any] , lowercase__ : List[str] , lowercase__ : str = "eng_Latn" , lowercase__ : Optional[List[str]] = None , lowercase__ : str = "fra_Latn" , **lowercase__ : Union[str, Any] , ):
a_ = src_lang
a_ = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def __magic_name__ ( self : Tuple ):
return self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self : Optional[int] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self : Tuple , lowercase__ : int ):
a_ = self.convert_tokens_to_ids(lowercase__ )
if self.legacy_behaviour:
a_ = []
a_ = [self.eos_token_id, self.cur_lang_code]
else:
a_ = [self.cur_lang_code]
a_ = [self.eos_token_id]
a_ = self.convert_ids_to_tokens(self.prefix_tokens )
a_ = self.convert_ids_to_tokens(self.suffix_tokens )
a_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __magic_name__ ( self : List[str] , lowercase__ : str ):
a_ = self.convert_tokens_to_ids(lowercase__ )
if self.legacy_behaviour:
a_ = []
a_ = [self.eos_token_id, self.cur_lang_code]
else:
a_ = [self.cur_lang_code]
a_ = [self.eos_token_id]
a_ = self.convert_ids_to_tokens(self.prefix_tokens )
a_ = self.convert_ids_to_tokens(self.suffix_tokens )
a_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __magic_name__ ( self : Union[str, Any] , lowercase__ : str , lowercase__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory." )
return
a_ = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 143 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 565 |
'''simple docstring'''
import math
def _UpperCAmelCase ( __A : int ):
a_ : str = []
a_ : Tuple = 2
a_ : Optional[Any] = int(math.sqrt(__A ) ) # Size of every segment
a_ : Optional[Any] = [True] * (end + 1)
a_ : Union[str, Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(__A )
for i in range(start * start , end + 1 , __A ):
a_ : List[Any] = False
start += 1
prime += in_prime
a_ : Any = end + 1
a_ : Optional[Any] = min(2 * end , __A )
while low <= n:
a_ : Optional[Any] = [True] * (high - low + 1)
for each in in_prime:
a_ : Any = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__A , high + 1 , __A ):
a_ : Union[str, Any] = False
for j in range(len(__A ) ):
if temp[j] is True:
prime.append(j + low )
a_ : Tuple = high + 1
a_ : str = min(high + end , __A )
return prime
print(sieve(10**6))
| 466 | 0 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 718 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase=2_81_23 ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowercase_ = set()
lowercase_ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(__lowerCAmelCase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 100 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : list ) -> list:
__A : Dict = False
while is_sorted is False: # Until all the indices are traversed keep looping
__A : int = True
for i in range(0 , len(__snake_case ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__A ,__A : List[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
__A : int = False
for i in range(1 , len(__snake_case ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__A ,__A : Optional[int] = input_list[i + 1], input_list[i]
# swapping if elements not in order
__A : str = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
lowercase__ : Dict = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowercase__ : Union[str, Any] = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list) | 8 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase__ : Any = '''hf-internal-testing/tiny-random-bert'''
lowercase__ : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowercase__ : List[Any] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase)))
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Any = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
self.assertTrue(os.path.isfile(_UpperCAmelCase))
# File is cached at the same place the second time.
__A : Tuple = cached_file(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
# Using a specific revision to test the full commit hash.
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='9b8c223')
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
__A : Dict = cached_file('tiny-random-bert' , _UpperCAmelCase)
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
__A : Optional[int] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='aaaa')
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : int = cached_file(_UpperCAmelCase , 'conf')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : Any = cached_file(_UpperCAmelCase , 'conf')
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '.no_exist' , _UpperCAmelCase , 'conf')))
__A : List[Any] = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : str = cached_file(_UpperCAmelCase , 'conf' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : List[str] = mock.Mock()
__A : Dict = 500
__A : List[str] = {}
__A : List[Any] = HTTPError
__A : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head:
__A : Dict = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
get_file_from_repo('bert-base-case' , _UpperCAmelCase)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased' , _UpperCAmelCase , revision='ahaha')
__A : List[str] = get_file_from_repo('bert-base-cased' , _UpperCAmelCase)
# The name is the cached name which is not very easy to test, so instead we load the content.
__A : List[str] = json.loads(open(_UpperCAmelCase , 'r').read())
self.assertEqual(config['hidden_size'] , 768)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Tuple = Path(_UpperCAmelCase) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , 'a.txt') , str(_UpperCAmelCase))
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , 'b.txt')) | 8 | 1 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = torch.nn.Linear(10 , 10 )
lowerCAmelCase__ = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase__ = Accelerator()
lowerCAmelCase__ = accelerator.prepare(_UpperCamelCase )
try:
pickle.loads(pickle.dumps(_UpperCamelCase ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 700 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case : int = logging.get_logger("""transformers.models.encodec""")
__snake_case : Tuple = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__snake_case : List[Any] = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__snake_case : str = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__snake_case : str = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__snake_case : Any = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__snake_case : int = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case : int = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case : Union[str, Any] = []
__snake_case : Tuple = []
def _UpperCamelCase ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
for attribute in key.split('.' ):
lowerCAmelCase__ = getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
lowerCAmelCase__ = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
lowerCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
elif weight_type == "running_mean":
lowerCAmelCase__ = value
elif weight_type == "running_var":
lowerCAmelCase__ = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase__ = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase__ = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase__ = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase__ = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase__ = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase__ = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase__ = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase__ = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(F"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def _UpperCamelCase ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] ) -> Tuple:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase__ , lowerCAmelCase__ = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _UpperCamelCase ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase__ = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase__ = MAPPING_48K
else:
raise ValueError(F"Unsupported model: {model_name}" )
for name, value in orig_dict.items():
if should_ignore(UpperCamelCase_ , UpperCamelCase_ ):
logger.info(F"{name} was ignored" )
continue
lowerCAmelCase__ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase__ , lowerCAmelCase__ = key.split('.*.' )
if prefix in name and suffix in name:
lowerCAmelCase__ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(UpperCamelCase_ )[0].split('.' )[-2]
lowerCAmelCase__ = mapped_key.replace('*' , UpperCamelCase_ )
if "weight_g" in name:
lowerCAmelCase__ = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase__ = 'weight_v'
elif "weight_ih_l0" in name:
lowerCAmelCase__ = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCAmelCase__ = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCAmelCase__ = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCAmelCase__ = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCAmelCase__ = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCAmelCase__ = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCAmelCase__ = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCAmelCase__ = 'bias_hh_l1'
elif "bias" in name:
lowerCAmelCase__ = 'bias'
elif "weight" in name:
lowerCAmelCase__ = 'weight'
elif "running_mean" in name:
lowerCAmelCase__ = 'running_mean'
elif "running_var" in name:
lowerCAmelCase__ = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase__ = 'num_batches_tracked'
else:
lowerCAmelCase__ = None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : List[Any]=None , ) -> Dict:
"""simple docstring"""
if config_path is not None:
lowerCAmelCase__ = EncodecConfig.from_pretrained(UpperCamelCase_ )
else:
lowerCAmelCase__ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase__ = [8, 5, 4, 4]
lowerCAmelCase__ = [2.2]
lowerCAmelCase__ = 64
lowerCAmelCase__ = 3_2000
lowerCAmelCase__ = 2048
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
elif model_name == "encodec_48khz":
lowerCAmelCase__ = [8, 5, 4, 2]
lowerCAmelCase__ = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase__ = 4_8000
lowerCAmelCase__ = 2
lowerCAmelCase__ = False
lowerCAmelCase__ = 'time_group_norm'
lowerCAmelCase__ = True
lowerCAmelCase__ = 1.0
lowerCAmelCase__ = 0.01
else:
raise ValueError(F"Unknown model name: {model_name}" )
lowerCAmelCase__ = EncodecModel(UpperCamelCase_ )
lowerCAmelCase__ = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = torch.load(UpperCamelCase_ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase__ = original_checkpoint['best_state']
recursively_load_weights(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(UpperCamelCase_ )
model.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case : Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 365 | 0 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class lowerCamelCase__ :
def __init__( self : Any , lowercase__ : List[Any] , lowercase__ : int=13 , lowercase__ : str=7 , lowercase__ : Optional[Any]=True , lowercase__ : Optional[int]=True , lowercase__ : List[Any]=False , lowercase__ : int=True , lowercase__ : int=99 , lowercase__ : Any=32 , lowercase__ : Optional[int]=5 , lowercase__ : Optional[int]=4 , lowercase__ : List[str]=37 , lowercase__ : int="gelu" , lowercase__ : List[Any]=0.1 , lowercase__ : str=0.1 , lowercase__ : str=5_12 , lowercase__ : Union[str, Any]=16 , lowercase__ : List[Any]=2 , lowercase__ : List[Any]=0.0_2 , lowercase__ : int=3 , lowercase__ : Optional[Any]=4 , lowercase__ : List[Any]=None , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : str ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , use_stable_embedding=lowercase__ , )
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Any , lowercase__ : List[str] , lowercase__ : str , lowercase__ : List[Any] ):
_lowerCAmelCase = OpenLlamaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ )
_lowerCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : List[str] , lowercase__ : Any , lowercase__ : List[str] , lowercase__ : int , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] , ):
_lowerCAmelCase = True
_lowerCAmelCase = OpenLlamaModel(lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(
lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , )
_lowerCAmelCase = model(
lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , )
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : Optional[int] , ):
_lowerCAmelCase = OpenLlamaForCausalLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Optional[int] , lowercase__ : str , lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : int , ):
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = OpenLlamaForCausalLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
# first forward pass
_lowerCAmelCase = model(
lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , use_cache=lowercase__ , )
_lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCAmelCase = model(
lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , output_hidden_states=lowercase__ , )["""hidden_states"""][0]
_lowerCAmelCase = model(
lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , past_key_values=lowercase__ , output_hidden_states=lowercase__ , )["""hidden_states"""][0]
# select random slice
_lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
_lowerCAmelCase = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) = config_and_inputs
_lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
UpperCamelCase__ =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
UpperCamelCase__ =(OpenLlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ =(
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ =False
UpperCamelCase__ =False
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
_lowerCAmelCase = OpenLlamaModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = input_dict["""input_ids"""]
_lowerCAmelCase = input_ids.ne(1 ).to(lowercase__ )
_lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCAmelCase = OpenLlamaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = """single_label_classification"""
_lowerCAmelCase = input_dict["""input_ids"""]
_lowerCAmelCase = input_ids.ne(1 ).to(lowercase__ )
_lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCAmelCase = OpenLlamaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = """multi_label_classification"""
_lowerCAmelCase = input_dict["""input_ids"""]
_lowerCAmelCase = input_ids.ne(1 ).to(lowercase__ )
_lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCAmelCase = OpenLlamaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def SCREAMING_SNAKE_CASE__ ( self : int ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Union[str, Any] ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size )
_lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCAmelCase = OpenLlamaModel(lowercase__ )
original_model.to(lowercase__ )
original_model.eval()
_lowerCAmelCase = original_model(lowercase__ ).last_hidden_state
_lowerCAmelCase = original_model(lowercase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCAmelCase = {"""type""": scaling_type, """factor""": 1_0.0}
_lowerCAmelCase = OpenLlamaModel(lowercase__ )
scaled_model.to(lowercase__ )
scaled_model.eval()
_lowerCAmelCase = scaled_model(lowercase__ ).last_hidden_state
_lowerCAmelCase = scaled_model(lowercase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowercase__ , lowercase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase__ , lowercase__ , atol=1e-5 ) )
| 192 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
a__ = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
a__ = logging.WARNING
def A__ () -> Any:
__UpperCamelCase : Union[str, Any] = os.getenv("""DATASETS_VERBOSITY""" , snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def A__ () -> str:
return __name__.split(""".""" )[0]
def A__ () -> logging.Logger:
return logging.getLogger(_get_library_name() )
def A__ () -> None:
# Apply our default configuration to the library root logger.
__UpperCamelCase : Any = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def A__ () -> None:
__UpperCamelCase : List[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def A__ (snake_case : Optional[str] = None ) -> logging.Logger:
if name is None:
__UpperCamelCase : Optional[int] = _get_library_name()
return logging.getLogger(snake_case )
def A__ () -> int:
return _get_library_root_logger().getEffectiveLevel()
def A__ (snake_case : int ) -> None:
_get_library_root_logger().setLevel(snake_case )
def A__ () -> Dict:
return set_verbosity(snake_case )
def A__ () -> Any:
return set_verbosity(snake_case )
def A__ () -> Dict:
return set_verbosity(snake_case )
def A__ () -> str:
return set_verbosity(snake_case )
def A__ () -> None:
__UpperCamelCase : str = False
def A__ () -> None:
__UpperCamelCase : List[str] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self : Optional[int] , *lowerCAmelCase : str , **lowerCAmelCase : List[str] ) -> Union[str, Any]: # pylint: disable=unused-argument
"""simple docstring"""
__UpperCamelCase : List[str] = args[0] if args else None
def __iter__( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : int , lowerCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
def empty_fn(*lowerCAmelCase : List[Any] , **lowerCAmelCase : Dict ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Tuple ) -> Any:
"""simple docstring"""
return self
def __exit__( self : str , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
return
a__ = True
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __call__( self : str , *lowerCAmelCase : List[Any] , lowerCAmelCase : Dict=False , **lowerCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowerCAmelCase , **lowerCAmelCase )
else:
return EmptyTqdm(*lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
__UpperCamelCase : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
a__ = _tqdm_cls()
def A__ () -> bool:
global _tqdm_active
return bool(_tqdm_active )
def A__ () -> List[str]:
global _tqdm_active
__UpperCamelCase : Tuple = True
def A__ () -> int:
global _tqdm_active
__UpperCamelCase : Any = False
| 279 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Dict = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
a_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 709 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCAmelCase = DiTPipeline
_lowerCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
_lowerCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_lowerCAmelCase = False
def __UpperCAmelCase ( self ) -> Dict:
torch.manual_seed(0 )
_a = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__magic_name__ , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=__magic_name__ , )
_a = AutoencoderKL()
_a = DDIMScheduler()
_a = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=0 ) -> Union[str, Any]:
if str(__magic_name__ ).startswith('mps' ):
_a = torch.manual_seed(__magic_name__ )
else:
_a = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
_a = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ) -> Tuple:
_a = 'cpu'
_a = self.get_dummy_components()
_a = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
_a = self.get_dummy_inputs(__magic_name__ )
_a = pipe(**__magic_name__ ).images
_a = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_a = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_a = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__magic_name__ , 1e-3 )
def __UpperCAmelCase ( self ) -> Any:
self._test_inference_batch_single_identical(relax_max_difference=__magic_name__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class a ( unittest.TestCase ):
def __UpperCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = torch.manual_seed(0 )
_a = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
_a = ['vase', 'umbrella', 'white shark', 'white wolf']
_a = pipe.get_label_ids(__magic_name__ )
_a = pipe(__magic_name__ , generator=__magic_name__ , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(__magic_name__ , __magic_name__ ):
_a = load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1e-2
def __UpperCAmelCase ( self ) -> Any:
_a = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
_a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
_a = ['vase', 'umbrella']
_a = pipe.get_label_ids(__magic_name__ )
_a = torch.manual_seed(0 )
_a = pipe(__magic_name__ , generator=__magic_name__ , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(__magic_name__ , __magic_name__ ):
_a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 532 | 0 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 51 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _lowerCamelCase ( a_ : List[str] , a_ : Union[str, Any]=False):
try:
lowerCamelCase :int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCamelCase :Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
lowerCamelCase :Dict = strtobool(a_)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no.")
return _value
A__ = parse_flag_from_env("""RUN_SLOW""", default=False)
A__ = parse_flag_from_env("""RUN_REMOTE""", default=False)
A__ = parse_flag_from_env("""RUN_LOCAL""", default=True)
A__ = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
A__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
A__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
A__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
A__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
A__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
A__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
A__ = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def _lowerCamelCase ( a_ : List[Any]):
try:
import faiss # noqa
except ImportError:
lowerCamelCase :Dict = unittest.skip('''test requires faiss''')(a_)
return test_case
def _lowerCamelCase ( a_ : List[Any]):
try:
import regex # noqa
except ImportError:
lowerCamelCase :Any = unittest.skip('''test requires regex''')(a_)
return test_case
def _lowerCamelCase ( a_ : int):
try:
import elasticsearch # noqa
except ImportError:
lowerCamelCase :List[str] = unittest.skip('''test requires elasticsearch''')(a_)
return test_case
def _lowerCamelCase ( a_ : Optional[int]):
try:
import sqlalchemy # noqa
except ImportError:
lowerCamelCase :Optional[Any] = unittest.skip('''test requires sqlalchemy''')(a_)
return test_case
def _lowerCamelCase ( a_ : Optional[int]):
if not config.TORCH_AVAILABLE:
lowerCamelCase :int = unittest.skip('''test requires PyTorch''')(a_)
return test_case
def _lowerCamelCase ( a_ : Any):
if not config.TF_AVAILABLE:
lowerCamelCase :Dict = unittest.skip('''test requires TensorFlow''')(a_)
return test_case
def _lowerCamelCase ( a_ : Optional[Any]):
if not config.JAX_AVAILABLE:
lowerCamelCase :int = unittest.skip('''test requires JAX''')(a_)
return test_case
def _lowerCamelCase ( a_ : Union[str, Any]):
if not config.PIL_AVAILABLE:
lowerCamelCase :Tuple = unittest.skip('''test requires Pillow''')(a_)
return test_case
def _lowerCamelCase ( a_ : List[str]):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''')(a_)
else:
return test_case
def _lowerCamelCase ( a_ : List[Any]):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''')(a_)
else:
return test_case
def _lowerCamelCase ( a_ : Dict):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''')(a_)
else:
return test_case
def _lowerCamelCase ( a_ : Tuple):
def _require_spacy_model(a_ : List[Any]):
try:
import spacy # noqa F401
spacy.load(a_)
except ImportError:
return unittest.skip('''test requires spacy''')(a_)
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(a_))(a_)
else:
return test_case
return _require_spacy_model
def _lowerCamelCase ( a_ : int):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''')(a_)
else:
return test_case
def _lowerCamelCase ( a_ : Dict):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''')(a_)
else:
return test_case
def _lowerCamelCase ( a_ : Optional[int]):
if not _run_slow_tests or _run_slow_tests == 0:
lowerCamelCase :Any = unittest.skip('''test is slow''')(a_)
return test_case
def _lowerCamelCase ( a_ : Union[str, Any]):
if not _run_local_tests or _run_local_tests == 0:
lowerCamelCase :Optional[Any] = unittest.skip('''test is local''')(a_)
return test_case
def _lowerCamelCase ( a_ : List[Any]):
if not _run_packaged_tests or _run_packaged_tests == 0:
lowerCamelCase :Union[str, Any] = unittest.skip('''test is packaged''')(a_)
return test_case
def _lowerCamelCase ( a_ : Union[str, Any]):
if not _run_remote_tests or _run_remote_tests == 0:
lowerCamelCase :List[Any] = unittest.skip('''test requires remote''')(a_)
return test_case
def _lowerCamelCase ( *a_ : Optional[Any]):
def decorate(cls : Tuple):
for name, fn in cls.__dict__.items():
if callable(a_) and name.startswith('''test'''):
for decorator in decorators:
lowerCamelCase :List[str] = decorator(a_)
setattr(cls , a_ , a_)
return cls
return decorate
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
pass
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 0
_UpperCAmelCase = 1
_UpperCAmelCase = 2
@contextmanager
def _lowerCamelCase ( a_ : List[str]=OfflineSimulationMode.CONNECTION_FAILS , a_ : int=1e-1_6):
lowerCamelCase :Tuple = requests.Session().request
def timeout_request(a_ : Any , a_ : List[Any] , a_ : Optional[int] , **a_ : Optional[Any]):
# Change the url to an invalid url so that the connection hangs
lowerCamelCase :Dict = '''https://10.255.255.1'''
if kwargs.get('''timeout''') is None:
raise RequestWouldHangIndefinitelyError(
F"Tried a call to {url} in offline mode with no timeout set. Please set a timeout.")
lowerCamelCase :List[str] = timeout
try:
return online_request(a_ , a_ , **a_)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowerCamelCase :Dict = url
lowerCamelCase :str = e.args[0]
lowerCamelCase :List[Any] = (max_retry_error.args[0].replace('''10.255.255.1''' , F"OfflineMock[{url}]"),)
lowerCamelCase :Optional[Any] = (max_retry_error,)
raise
def raise_connection_error(a_ : Dict , a_ : Union[str, Any] , **a_ : List[str]):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=a_)
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , a_):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , a_):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , a_):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''')
@contextmanager
def _lowerCamelCase ( *a_ : str , **a_ : Union[str, Any]):
lowerCamelCase :Optional[int] = str(Path().resolve())
with tempfile.TemporaryDirectory(*a_ , **a_) as tmp_dir:
try:
os.chdir(a_)
yield
finally:
os.chdir(a_)
@contextmanager
def _lowerCamelCase ( ):
import gc
gc.collect()
lowerCamelCase :List[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _lowerCamelCase ( ):
import gc
gc.collect()
lowerCamelCase :Optional[int] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Union[str, Any]):
return deepcopy(a_).integers(0 , 1_00 , 10).tolist() == deepcopy(a_).integers(0 , 1_00 , 10).tolist()
def _lowerCamelCase ( a_ : Dict):
import decorator
from requests.exceptions import HTTPError
def _wrapper(a_ : int , *a_ : Union[str, Any] , **a_ : Union[str, Any]):
try:
return func(*a_ , **a_)
except HTTPError as err:
if str(a_).startswith('''500''') or str(a_).startswith('''502'''):
pytest.xfail(str(a_))
raise err
return decorator.decorator(_wrapper , a_)
class _lowerCAmelCase :
def __init__( self : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Tuple ):
lowerCamelCase :Tuple = returncode
lowerCamelCase :Union[str, Any] = stdout
lowerCamelCase :Optional[Any] = stderr
async def _lowerCamelCase ( a_ : Optional[Any] , a_ : Union[str, Any]):
while True:
lowerCamelCase :Optional[int] = await stream.readline()
if line:
callback(a_)
else:
break
async def _lowerCamelCase ( a_ : Tuple , a_ : str=None , a_ : Any=None , a_ : str=None , a_ : Union[str, Any]=False , a_ : List[str]=False):
if echo:
print('''\nRunning: ''' , ''' '''.join(a_))
lowerCamelCase :int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=a_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=a_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCamelCase :List[str] = []
lowerCamelCase :str = []
def tee(a_ : Union[str, Any] , a_ : int , a_ : str , a_ : List[str]=""):
lowerCamelCase :Any = line.decode('''utf-8''').rstrip()
sink.append(a_)
if not quiet:
print(a_ , a_ , file=a_)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda a_: tee(a_ , a_ , sys.stdout , label='''stdout:''')),
_read_stream(p.stderr , lambda a_: tee(a_ , a_ , sys.stderr , label='''stderr:''')),
] , timeout=a_ , )
return _RunOutput(await p.wait() , a_ , a_)
def _lowerCamelCase ( a_ : str , a_ : Tuple=None , a_ : List[Any]=None , a_ : Optional[int]=1_80 , a_ : Optional[int]=False , a_ : str=True):
lowerCamelCase :Tuple = asyncio.get_event_loop()
lowerCamelCase :List[Any] = loop.run_until_complete(
_stream_subprocess(a_ , env=a_ , stdin=a_ , timeout=a_ , quiet=a_ , echo=a_))
lowerCamelCase :Optional[int] = ''' '''.join(a_)
if result.returncode > 0:
lowerCamelCase :Optional[int] = '''\n'''.join(result.stderr)
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}")
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"'{cmd_str}' produced no output.")
return result
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''')
lowerCamelCase :List[Any] = re.sub(R'''^gw''' , '''''' , a_ , 0 , re.M)
return int(a_)
def _lowerCamelCase ( ):
lowerCamelCase :Any = 2_95_00
lowerCamelCase :List[str] = pytest_xdist_worker_id()
return port + uniq_delta
| 166 | 0 |
from __future__ import annotations
def _lowercase ( lowercase__ , lowercase__ ):
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowercase__ ):
print(f"""{i}\t\t{d}""" )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
for j in range(lowercase__ ):
__lowerCAmelCase : List[str] = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : List[str] = [float('''inf''' )] * vertex_count
__lowerCAmelCase : List[Any] = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase__ ):
__lowerCAmelCase : str = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__lowerCAmelCase : List[str] = distance[u] + w
__lowerCAmelCase : List[str] = check_negative_cycle(lowercase__ , lowercase__ , lowercase__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = int(input("Enter number of vertices: ").strip())
_UpperCamelCase = int(input("Enter number of edges: ").strip())
_UpperCamelCase = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
_UpperCamelCase = {"src": src, "dst": dest, "weight": weight}
_UpperCamelCase = int(input("\nEnter shortest path source:").strip())
_UpperCamelCase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 714 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_UpperCamelCase = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __lowercase :
def __init__( self , A_ , A_=16 , A_=13 , A_=7 , A_=14 , A_=10 , A_=19 , A_=5 , A_=4 , A_=True , A_=16 , A_=2 , A_=4 , A_=4 , A_="gelu" , A_=0.1 , A_=0.1 , A_=[1, 2, 3, 4, 5] , A_=25 , A_=5 , ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = d_model
__lowerCAmelCase : Any = parent
__lowerCAmelCase : List[Any] = batch_size
__lowerCAmelCase : int = prediction_length
__lowerCAmelCase : str = context_length
__lowerCAmelCase : Any = cardinality
__lowerCAmelCase : Tuple = num_time_features
__lowerCAmelCase : List[str] = lags_sequence
__lowerCAmelCase : Any = embedding_dimension
__lowerCAmelCase : Dict = is_training
__lowerCAmelCase : Optional[int] = hidden_size
__lowerCAmelCase : List[str] = num_hidden_layers
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[Any] = context_length
__lowerCAmelCase : Any = prediction_length + label_length
__lowerCAmelCase : Union[str, Any] = label_length
__lowerCAmelCase : Any = moving_average
__lowerCAmelCase : Optional[int] = autocorrelation_factor
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def UpperCamelCase__ ( self , A_ ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : str = config.context_length + max(config.lags_sequence )
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase : Dict = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase : Tuple = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase : str = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase : Tuple = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase : Optional[int] = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.get_config()
__lowerCAmelCase : Tuple = self.prepare_autoformer_inputs_dict(A_ )
return config, inputs_dict
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : int = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self , A_ , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = AutoformerModel(config=A_ ).to(A_ ).eval()
__lowerCAmelCase : Optional[int] = model(**A_ )
__lowerCAmelCase : Dict = outputs.encoder_last_hidden_state
__lowerCAmelCase : int = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Tuple = model.get_encoder()
encoder.save_pretrained(A_ )
__lowerCAmelCase : List[Any] = AutoformerEncoder.from_pretrained(A_ ).to(A_ )
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Any = model.create_network_inputs(**A_ )
__lowerCAmelCase, __lowerCAmelCase : Dict = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase : Dict = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase : Optional[Any] = encoder(inputs_embeds=A_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase : List[Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase : List[str] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase : Dict = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase : str = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Dict = model.get_decoder()
decoder.save_pretrained(A_ )
__lowerCAmelCase : Any = AutoformerDecoder.from_pretrained(A_ ).to(A_ )
__lowerCAmelCase : List[str] = decoder(
trend=A_ , inputs_embeds=A_ , encoder_hidden_states=A_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __lowercase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_UpperCamelCase = (AutoformerForPrediction,) if is_torch_available() else ()
_UpperCamelCase = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : int = AutoformerModelTester(self )
__lowerCAmelCase : Dict = ConfigTester(self , config_class=A_ , has_text_modality=A_ )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase : Optional[Any] = model_class(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ )
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = model_class.from_pretrained(A_ , output_loading_info=A_ )
self.assertEqual(info['''missing_keys'''] , [] )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A_ )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
pass
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = inspect.signature(getattr(A_ , '''forward''' ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase : Optional[Any] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , A_ )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : str = model_class(A_ )
__lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
__lowerCAmelCase : str = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(A_ )] , A_ )
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = True
__lowerCAmelCase : Tuple = getattr(self.model_tester , '''seq_length''' , A_ )
__lowerCAmelCase : Tuple = getattr(self.model_tester , '''decoder_seq_length''' , A_ )
__lowerCAmelCase : Any = getattr(self.model_tester , '''encoder_seq_length''' , A_ )
__lowerCAmelCase : List[str] = getattr(self.model_tester , '''d_model''' , A_ )
__lowerCAmelCase : int = getattr(self.model_tester , '''num_attention_heads''' , A_ )
__lowerCAmelCase : Union[str, Any] = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase : Any = True
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[int] = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase : str = model(**self._prepare_for_class(A_ , A_ ) )
__lowerCAmelCase : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Union[str, Any] = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Any = model(**self._prepare_for_class(A_ , A_ ) )
__lowerCAmelCase : Dict = outputs.encoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase : Optional[Any] = len(A_ )
__lowerCAmelCase : List[Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(A_ , A_ )
# decoder attentions
__lowerCAmelCase : List[str] = outputs.decoder_attentions
self.assertIsInstance(A_ , (list, tuple) )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase : Any = outputs.cross_attentions
self.assertIsInstance(A_ , (list, tuple) )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Dict = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + 2 , len(A_ ) )
__lowerCAmelCase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def _lowercase ( lowercase__="train-batch.pt" ):
__lowerCAmelCase : List[str] = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=lowercase__ , repo_type='''dataset''' )
__lowerCAmelCase : Tuple = torch.load(lowercase__ , map_location=lowercase__ )
return batch
@require_torch
@slow
class __lowercase (unittest.TestCase ):
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A_ )
__lowerCAmelCase : Tuple = prepare_batch()
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
__lowerCAmelCase : Dict = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , A_ )
__lowerCAmelCase : Union[str, Any] = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=A_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , A_ , atol=A_ ) )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Tuple = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A_ )
__lowerCAmelCase : Dict = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__lowerCAmelCase : List[Any] = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
__lowerCAmelCase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , A_ )
__lowerCAmelCase : Any = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=A_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , A_ , atol=A_ ) )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Dict = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A_ )
__lowerCAmelCase : Dict = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
__lowerCAmelCase : Optional[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , A_ )
__lowerCAmelCase : Optional[Any] = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=A_ )
__lowerCAmelCase : Tuple = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , A_ , rtol=1e-1 ) )
| 583 | 0 |
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Tuple ) ->Any:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Tuple , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : List[str]=True ) ->List[str]:
model.train()
lowerCamelCase__ : Optional[Any] =model(snake_case_ )
lowerCamelCase__ : Any =F.mse_loss(snake_case_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Any=False ) ->str:
set_seed(4_2 )
lowerCamelCase__ : List[str] =RegressionModel()
lowerCamelCase__ : List[str] =deepcopy(snake_case_ )
lowerCamelCase__ : Dict =RegressionDataset(length=8_0 )
lowerCamelCase__ : List[str] =DataLoader(snake_case_ , batch_size=1_6 )
model.to(accelerator.device )
if sched:
lowerCamelCase__ : Union[str, Any] =AdamW(params=model.parameters() , lr=1E-3 )
lowerCamelCase__ : str =AdamW(params=ddp_model.parameters() , lr=1E-3 )
lowerCamelCase__ : Tuple =LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
lowerCamelCase__ : Optional[Any] =LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
# Make a copy of `model`
if sched:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any =accelerator.prepare(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
lowerCamelCase__ , lowerCamelCase__ : List[Any] =accelerator.prepare(snake_case_ , snake_case_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCAmelCase_ ( snake_case_ : List[Any] ) ->str:
# Test when on a single CPU or GPU that the context manager does nothing
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =get_training_setup(snake_case_ )
# Use a single batch
lowerCamelCase__ , lowerCamelCase__ : int =next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase__ , lowerCamelCase__ : int =accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
lowerCamelCase__ : List[Any] =ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) ->Union[str, Any]:
# Test on distributed setup that context manager behaves properly
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =get_training_setup(snake_case_ )
# Use a single batch
lowerCamelCase__ , lowerCamelCase__ : List[str] =next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase__ , lowerCamelCase__ : str =accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
lowerCamelCase__ : str =ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : str=False , snake_case_ : Union[str, Any]=False ) ->Tuple:
lowerCamelCase__ : Any =Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str =get_training_setup(snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
lowerCamelCase__ , lowerCamelCase__ : int =batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase__ , lowerCamelCase__ : int =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
lowerCamelCase__ : str =ddp_input[torch.randperm(len(snake_case_ ) )]
GradientState._reset_state()
def lowerCAmelCase_ ( snake_case_ : Union[str, Any]=False , snake_case_ : List[str]=False ) ->Optional[Any]:
lowerCamelCase__ : Dict =Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =get_training_setup(snake_case_ , snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
lowerCamelCase__ , lowerCamelCase__ : str =batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase__ , lowerCamelCase__ : int =accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase__ , lowerCamelCase__ : Tuple =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"""
lowerCamelCase__ : List[Any] =(((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case_ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def lowerCAmelCase_ ( ) ->Dict:
lowerCamelCase__ : int =Accelerator()
lowerCamelCase__ : Optional[Any] =RegressionDataset(length=8_0 )
lowerCamelCase__ : List[Any] =DataLoader(snake_case_ , batch_size=1_6 )
lowerCamelCase__ : Dict =RegressionDataset(length=9_6 )
lowerCamelCase__ : Union[str, Any] =DataLoader(snake_case_ , batch_size=1_6 )
lowerCamelCase__ , lowerCamelCase__ : int =accelerator.prepare(snake_case_ , snake_case_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if iteration < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if batch_num < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCAmelCase_ ( ) ->Tuple:
lowerCamelCase__ : str =Accelerator()
lowerCamelCase__ : Optional[Any] =accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(snake_case_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(snake_case_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(snake_case_ , snake_case_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) ->str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 174 |
"""simple docstring"""
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) ->list:
lowerCamelCase__ : Optional[Any] =len(snake_case_ )
lowerCamelCase__ : Any =[]
for i in range(len(snake_case_ ) - pat_len + 1 ):
lowerCamelCase__ : str =True
for j in range(snake_case_ ):
if s[i + j] != pattern[j]:
lowerCamelCase__ : Optional[int] =False
break
if match_found:
position.append(snake_case_ )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC""")) | 174 | 1 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__A =logging.get_logger(__name__)
__A =TypeVar("DatasetType", Dataset, IterableDataset)
def a ( _UpperCAmelCase : List[DatasetType] , _UpperCAmelCase : Optional[List[float]] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[DatasetInfo] = None , _UpperCAmelCase : Optional[NamedSplit] = None , _UpperCAmelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(_UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'''is an empty dataset dictionary.''' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(_UpperCAmelCase )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCAmelCase ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCAmelCase ).__name__}.' )
if i == 0:
__UpperCAmelCase , __UpperCAmelCase : Dict = (
(Dataset, IterableDataset) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , stopping_strategy=_UpperCAmelCase )
else:
return _interleave_iterable_datasets(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , stopping_strategy=_UpperCAmelCase )
def a ( _UpperCAmelCase : List[DatasetType] , _UpperCAmelCase : Optional[DatasetInfo] = None , _UpperCAmelCase : Optional[NamedSplit] = None , _UpperCAmelCase : int = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(_UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'''is an empty dataset dictionary.''' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(_UpperCAmelCase )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCAmelCase ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCAmelCase ).__name__}.' )
if i == 0:
__UpperCAmelCase , __UpperCAmelCase : Any = (
(Dataset, IterableDataset) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , axis=_UpperCAmelCase )
else:
return _concatenate_iterable_datasets(_UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , axis=_UpperCAmelCase )
| 241 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self : Optional[int] , a_ : UNetaDModel , a_ : KarrasVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__( self : Optional[Any] , a_ : int = 1 , a_ : int = 50 , a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a_ : Optional[str] = "pil" , a_ : bool = True , **a_ : List[Any] , ):
'''simple docstring'''
__UpperCAmelCase : Any = self.unet.config.sample_size
__UpperCAmelCase : int = (batch_size, 3, img_size, img_size)
__UpperCAmelCase : Optional[Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__UpperCAmelCase : str = randn_tensor(a_ , generator=a_ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
__UpperCAmelCase : str = self.scheduler.schedule[t]
__UpperCAmelCase : str = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.scheduler.add_noise_to_input(a_ , a_ , generator=a_ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__UpperCAmelCase : str = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__UpperCAmelCase : Optional[Any] = self.scheduler.step(a_ , a_ , a_ , a_ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__UpperCAmelCase : Tuple = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
__UpperCAmelCase : Optional[Any] = self.scheduler.step_correct(
a_ , a_ , a_ , a_ , step_output.prev_sample , step_output['''derivative'''] , )
__UpperCAmelCase : List[Any] = step_output.prev_sample
__UpperCAmelCase : str = (sample / 2 + 0.5).clamp(0 , 1 )
__UpperCAmelCase : Optional[int] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase : Tuple = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 241 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,_a : List[Any] ,_a : Union[str, Any]=3 ,_a : Any=32 ,_a : List[Any]=3 ,_a : Optional[int]=10 ,_a : List[Any]=[10, 20, 30, 40] ,_a : Optional[Any]=[1, 1, 2, 1] ,_a : Optional[int]=True ,_a : Tuple=True ,_a : Union[str, Any]="relu" ,_a : List[str]=3 ,_a : int=None ,):
'''simple docstring'''
_a : Tuple = parent
_a : List[str] = batch_size
_a : int = image_size
_a : List[str] = num_channels
_a : Tuple = embeddings_size
_a : Dict = hidden_sizes
_a : List[str] = depths
_a : Dict = is_training
_a : Union[str, Any] = use_labels
_a : Dict = hidden_act
_a : List[str] = num_labels
_a : Dict = scope
_a : List[str] = len(lowercase_ )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Dict = None
if self.use_labels:
_a : Dict = ids_tensor([self.batch_size] ,self.num_labels )
_a : Tuple = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def __lowercase ( self : Tuple ,_a : Any ,_a : List[str] ,_a : Tuple ):
'''simple docstring'''
_a : str = TFResNetModel(config=lowercase_ )
_a : str = model(lowercase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowercase ( self : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = self.num_labels
_a : List[str] = TFResNetForImageClassification(lowercase_ )
_a : List[Any] = model(lowercase_ ,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[str] = self.prepare_config_and_inputs()
_a, _a, _a : Dict = config_and_inputs
_a : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__UpperCAmelCase : int = (
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : Dict = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Dict = TFResNetModelTester(self )
_a : Optional[Any] = ConfigTester(self ,config_class=lowercase_ ,has_text_modality=lowercase_ )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : int ):
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowercase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a, _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(lowercase_ )
_a : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Optional[Any] = [*signature.parameters.keys()]
_a : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,lowercase_ )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __lowercase ( self : Any ):
'''simple docstring'''
def check_hidden_states_output(_a : str ,_a : List[Any] ,_a : List[str] ):
_a : Dict = model_class(lowercase_ )
_a : int = model(**self._prepare_for_class(lowercase_ ,lowercase_ ) )
_a : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_a, _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : str = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_a : int = layer_type
_a : Any = True
check_hidden_states_output(lowercase_ ,lowercase_ ,lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Optional[int] = True
check_hidden_states_output(lowercase_ ,lowercase_ ,lowercase_ )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = TFResNetModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_a : List[str] = self.default_image_processor
_a : Union[str, Any] = prepare_img()
_a : List[str] = image_processor(images=lowercase_ ,return_tensors='tf' )
# forward pass
_a : Union[str, Any] = model(**lowercase_ )
# verify the logits
_a : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowercase_ )
_a : str = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,lowercase_ ,atol=1E-4 ) )
| 229 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger()
@dataclass
class a_ :
'''simple docstring'''
__a: nn.Module
__a: List[nn.Module] = field(default_factory=a_ )
__a: list = field(default_factory=a_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = len(list(m.modules() ) ) == 1 or isinstance(lowercase_ , nn.Convad ) or isinstance(lowercase_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowercase_ )
def __call__( self , lowercase_ ) -> List[str]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowercase_ )
[x.remove() for x in self.handles]
return self
@property
def _lowercase ( self ) -> str:
'''simple docstring'''
return list(filter(lambda lowercase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a_ :
'''simple docstring'''
__a: nn.Module
__a: nn.Module
__a: int = 0
__a: List = field(default_factory=a_ )
__a: List = field(default_factory=a_ )
def __call__( self , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = Tracker(self.dest )(lowercase_ ).parametrized
lowerCAmelCase_ = Tracker(self.src )(lowercase_ ).parametrized
lowerCAmelCase_ = list(filter(lambda lowercase_ : type(lowercase_ ) not in self.src_skip , lowercase_ ) )
lowerCAmelCase_ = list(filter(lambda lowercase_ : type(lowercase_ ) not in self.dest_skip , lowercase_ ) )
if len(lowercase_ ) != len(lowercase_ ):
raise Exception(
f'''Numbers of operations are different. Source module has {len(lowercase_ )} operations while'''
f''' destination module has {len(lowercase_ )}.''' )
for dest_m, src_m in zip(lowercase_ , lowercase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
def lowerCamelCase ( a_ , a_ , a_ , a_ = True ) -> Optional[Any]:
print(F'''Converting {name}...''' )
with torch.no_grad():
lowerCAmelCase_ = timm.create_model(a_ , pretrained=a_ ).eval()
lowerCAmelCase_ = ResNetForImageClassification(a_ ).eval()
lowerCAmelCase_ = ModuleTransfer(src=a_ , dest=a_ )
lowerCAmelCase_ = torch.randn((1, 3, 224, 224) )
module_transfer(a_ )
assert torch.allclose(from_model(a_ ) , our_model(a_ ).logits ), "The model logits don't match the original one."
lowerCAmelCase_ = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(a_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=a_ , )
# we can use the convnext one
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=a_ , )
print(F'''Pushed {checkpoint_name}''' )
def lowerCamelCase ( a_ , a_ = None , a_ = True ) -> str:
lowerCAmelCase_ = 'imagenet-1k-id2label.json'
lowerCAmelCase_ = 1_000
lowerCAmelCase_ = (1, num_labels)
lowerCAmelCase_ = 'huggingface/label-files'
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase_ = {int(a_ ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = partial(a_ , num_labels=a_ , idalabel=a_ , labelaid=a_ )
lowerCAmelCase_ = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(a_ , names_to_config[model_name] , a_ , a_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(a_ , a_ , a_ , a_ )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 318 | 0 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
snake_case_ : List[Any] = logging.get_logger(__name__)
class __lowerCamelCase ( _snake_case ):
def __init__( self , *__snake_case , **__snake_case ) -> str:
"""simple docstring"""
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead." , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 713 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __UpperCAmelCase ( snake_case_ : int = 3 ):
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(snake_case_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 1_0:
raise ValueError("number of qubits too large to simulate(>10)." )
UpperCAmelCase: Union[str, Any] = QuantumRegister(snake_case_ , "qr" )
UpperCAmelCase: str = ClassicalRegister(snake_case_ , "cr" )
UpperCAmelCase: Optional[int] = QuantumCircuit(snake_case_ , snake_case_ )
UpperCAmelCase: Dict = number_of_qubits
for i in range(snake_case_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(snake_case_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , snake_case_ , snake_case_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(snake_case_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(snake_case_ , snake_case_ )
# simulate with 10000 shots
UpperCAmelCase: Optional[Any] = Aer.get_backend("qasm_simulator" )
UpperCAmelCase: List[Any] = execute(snake_case_ , snake_case_ , shots=1_0_0_0_0 )
return job.result().get_counts(snake_case_ )
if __name__ == "__main__":
print(
f"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 166 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __A ( UpperCamelCase__ ):
@slow
@require_torch
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
__magic_name__ : int =BertTokenizer.from_pretrained("""bert-base-uncased""" )
__magic_name__ : str =bertabert.config.encoder.vocab_size
__magic_name__ : Union[str, Any] =tokenizer.sep_token_id
__magic_name__ : Optional[Any] =tokenizer.cls_token_id
__magic_name__ : str =1_28
__magic_name__ : List[str] =datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
__magic_name__ : Union[str, Any] =datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
__magic_name__ : Union[str, Any] =train_dataset.select(range(32 ) )
__magic_name__ : str =val_dataset.select(range(16 ) )
__magic_name__ : int =4
def _map_to_encoder_decoder_inputs(__snake_case :Optional[int] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__magic_name__ : Dict =tokenizer(batch["""article"""] , padding="""max_length""" , truncation=__snake_case , max_length=5_12 )
__magic_name__ : Dict =tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=__snake_case , max_length=1_28 )
__magic_name__ : Optional[int] =inputs.input_ids
__magic_name__ : Tuple =inputs.attention_mask
__magic_name__ : Any =outputs.input_ids
__magic_name__ : Tuple =outputs.input_ids.copy()
__magic_name__ : Union[str, Any] =[
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
__magic_name__ : List[str] =outputs.attention_mask
assert all(len(__snake_case ) == 5_12 for x in inputs.input_ids )
assert all(len(__snake_case ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(__snake_case :Tuple ):
__magic_name__ : Tuple =pred.label_ids
__magic_name__ : Any =pred.predictions
# all unnecessary tokens are removed
__magic_name__ : Optional[int] =tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
__magic_name__ : List[Any] =tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
__magic_name__ : Optional[Any] =sum([int(pred_str[i] == label_str[i] ) for i in range(len(__snake_case ) )] ) / len(__snake_case )
return {"accuracy": accuracy}
# map train dataset
__magic_name__ : Dict =train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__snake_case , batch_size=__snake_case , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
__magic_name__ : List[str] =val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__snake_case , batch_size=__snake_case , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
__magic_name__ : Tuple =self.get_auto_remove_tmp_dir()
__magic_name__ : Optional[int] =SeqaSeqTrainingArguments(
output_dir=__snake_case , per_device_train_batch_size=__snake_case , per_device_eval_batch_size=__snake_case , predict_with_generate=__snake_case , evaluation_strategy="""steps""" , do_train=__snake_case , do_eval=__snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__magic_name__ : List[str] =SeqaSeqTrainer(
model=__snake_case , args=__snake_case , compute_metrics=_compute_metrics , train_dataset=__snake_case , eval_dataset=__snake_case , tokenizer=__snake_case , )
# start training
trainer.train()
| 21 |
'''simple docstring'''
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
__lowercase = emb.weight.data
return lin_layer
def _lowerCAmelCase ( lowerCamelCase_ : Tuple ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = Namespace(**checkpoint['''cfg''']['''model'''] )
__lowercase = checkpoint['''model''']
remove_ignore_keys_(lowerCamelCase_ )
__lowercase = state_dict['''decoder.embed_tokens.weight'''].shape[0]
__lowercase = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
__lowercase = XGLMConfig(
vocab_size=lowerCamelCase_ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
__lowercase = XGLMForCausalLM(lowerCamelCase_ )
__lowercase = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
print(lowerCamelCase_ )
__lowercase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 502 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = OpenAIGPTTokenizer
UpperCamelCase_ = OpenAIGPTTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = False
def A__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : int =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase : List[str] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : int =['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
lowercase : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase ) )
def A__ ( self : Tuple , UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
return "lower newer", "lower newer"
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : str =OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowercase : Any ='''lower'''
lowercase : Dict =['''low''', '''er</w>''']
lowercase : int =tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tokens + ['''<unk>''']
lowercase : Tuple =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
def A__ ( self : Optional[Any] , UpperCAmelCase : Dict=15 ) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase : List[Any] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
# Simple input
lowercase : str ='''This is a simple input'''
lowercase : int =['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : Optional[int] =('''This is a simple input''', '''This is a pair''')
lowercase : List[str] =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' , )
def A__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
pass
| 8 |
'''simple docstring'''
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A ) , __A )
return number - int(__A )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 8 | 1 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
UpperCAmelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
UpperCAmelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCamelCase_ ( self) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence"""),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence""") , id="""references"""),
}) , )
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=SCREAMING_SNAKE_CASE , hypotheses=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE)
}
| 88 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase_ = 1_6
UpperCamelCase_ = 3_2
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ = 16 ) -> List[str]:
__UpperCAmelCase =AutoTokenizer.from_pretrained('''bert-base-cased''' )
__UpperCAmelCase =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase =datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase =16
elif accelerator.mixed_precision != "no":
__UpperCAmelCase =8
else:
__UpperCAmelCase =None
return tokenizer.pad(
snake_case__ , padding='''longest''' , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
__UpperCAmelCase =DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
__UpperCAmelCase =DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase_ = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> Union[str, Any]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case__ ) == "1":
__UpperCAmelCase =2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__UpperCAmelCase =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
__UpperCAmelCase =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase =config['''lr''']
__UpperCAmelCase =int(config['''num_epochs'''] )
__UpperCAmelCase =int(config['''seed'''] )
__UpperCAmelCase =int(config['''batch_size'''] )
set_seed(snake_case__ )
__UpperCAmelCase , __UpperCAmelCase =get_dataloaders(snake_case__ , snake_case__ )
__UpperCAmelCase =evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__UpperCAmelCase =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCAmelCase =batch_size // MAX_GPU_BATCH_SIZE
__UpperCAmelCase =MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase =model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase =AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
__UpperCAmelCase =get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__UpperCAmelCase =os.path.split(snake_case__ )[-1].split('''.''' )[0]
accelerator.init_trackers(snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__UpperCAmelCase =0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCAmelCase =model(**snake_case__ )
__UpperCAmelCase =outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__UpperCAmelCase =loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase =model(**snake_case__ )
__UpperCAmelCase =outputs.logits.argmax(dim=-1 )
__UpperCAmelCase , __UpperCAmelCase =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
__UpperCAmelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , snake_case__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(snake_case__ ),
'''epoch''': epoch,
} , step=snake_case__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
__UpperCAmelCase =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=snake_case__ , default=snake_case__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=snake_case__ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
__UpperCAmelCase =parser.parse_args()
__UpperCAmelCase ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 132 | 0 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = SpeechTaTokenizer
__a = False
__a = True
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Optional[Any] = SpeechTaTokenizer(UpperCamelCase )
__UpperCAmelCase : Optional[int] = AddedToken("""<mask>""" , lstrip=UpperCamelCase , rstrip=UpperCamelCase )
__UpperCAmelCase : List[Any] = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : str , UpperCamelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Tuple = """this is a test"""
__UpperCAmelCase : List[Any] = """this is a test"""
return input_text, output_text
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Any=False , UpperCamelCase : Any=20 , UpperCamelCase : Dict=5 ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Dict = self.get_input_output_texts(UpperCamelCase )
__UpperCAmelCase : List[Any] = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
__UpperCAmelCase : Any = tokenizer.decode(UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase )
return text, ids
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = """<pad>"""
__UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(UpperCamelCase ) , 81 )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.get_tokenizers(do_lower_case=UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : Dict = tokenizer.vocab_size
__UpperCAmelCase : List[Any] = len(UpperCamelCase )
self.assertNotEqual(UpperCamelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__UpperCAmelCase : Any = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
__UpperCAmelCase : Any = tokenizer.add_tokens(UpperCamelCase )
__UpperCAmelCase : Optional[Any] = tokenizer.vocab_size
__UpperCAmelCase : str = len(UpperCamelCase )
self.assertNotEqual(UpperCamelCase , 0 )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , len(UpperCamelCase ) )
self.assertEqual(UpperCamelCase , all_size + len(UpperCamelCase ) )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCamelCase )
self.assertGreaterEqual(len(UpperCamelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__UpperCAmelCase : Dict = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
__UpperCAmelCase : List[Any] = tokenizer.add_special_tokens(UpperCamelCase )
__UpperCAmelCase : Any = tokenizer.vocab_size
__UpperCAmelCase : int = len(UpperCamelCase )
self.assertNotEqual(UpperCamelCase , 0 )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , len(UpperCamelCase ) )
self.assertEqual(UpperCamelCase , all_size_a + len(UpperCamelCase ) )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCamelCase )
self.assertGreaterEqual(len(UpperCamelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : str = self.get_tokenizer()
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(UpperCamelCase , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__UpperCAmelCase : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase )
# fmt: off
self.assertListEqual(UpperCamelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : str = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
__UpperCAmelCase : List[str] = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCamelCase , )
| 299 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Tuple = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
UpperCAmelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowerCamelCase ( _UpperCamelCase : str ) -> Optional[Any]:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__UpperCAmelCase : List[str] = model_type_to_module_name(_UpperCamelCase )
__UpperCAmelCase : List[str] = importlib.import_module(f'''.{module_name}''' , """transformers.models""" )
try:
return getattr(_UpperCamelCase , _UpperCamelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_UpperCamelCase , """__name__""" , _UpperCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__UpperCAmelCase : List[Any] = importlib.import_module("""transformers""" )
if hasattr(_UpperCamelCase , _UpperCamelCase ):
return getattr(_UpperCamelCase , _UpperCamelCase )
return None
def lowerCamelCase ( _UpperCamelCase : Union[str, os.PathLike] , _UpperCamelCase : Optional[Union[str, os.PathLike]] = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[Dict[str, str]] = None , _UpperCamelCase : Optional[Union[bool, str]] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : bool = False , **_UpperCamelCase : List[str] , ) -> int:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = get_file_from_repo(
_UpperCamelCase , _UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , resume_download=_UpperCamelCase , proxies=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , local_files_only=_UpperCamelCase , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(_UpperCamelCase , encoding="""utf-8""" ) as reader:
return json.load(_UpperCamelCase )
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase )
def lowerCamelCase__ ( cls : List[str] , UpperCamelCase : List[Any] , **UpperCamelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : List[str] = kwargs.pop("""config""" , UpperCamelCase )
__UpperCAmelCase : int = kwargs.pop("""trust_remote_code""" , UpperCamelCase )
__UpperCAmelCase : str = True
__UpperCAmelCase ,__UpperCAmelCase : str = ImageProcessingMixin.get_image_processor_dict(UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Tuple = config_dict.get("""image_processor_type""" , UpperCamelCase )
__UpperCAmelCase : str = None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
__UpperCAmelCase : Optional[int] = config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__UpperCAmelCase : str = config_dict.pop("""feature_extractor_type""" , UpperCamelCase )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
__UpperCAmelCase : Optional[int] = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
__UpperCAmelCase : Optional[Any] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
__UpperCAmelCase : str = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCamelCase , UpperCamelCase ):
__UpperCAmelCase : int = AutoConfig.from_pretrained(UpperCamelCase , **UpperCamelCase )
# It could be in `config.image_processor_type``
__UpperCAmelCase : Optional[int] = getattr(UpperCamelCase , """image_processor_type""" , UpperCamelCase )
if hasattr(UpperCamelCase , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
__UpperCAmelCase : Dict = config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
__UpperCAmelCase : Optional[int] = image_processor_class_from_name(UpperCamelCase )
__UpperCAmelCase : Optional[Any] = image_processor_auto_map is not None
__UpperCAmelCase : Tuple = image_processor_class is not None or type(UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING
__UpperCAmelCase : Any = resolve_trust_remote_code(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if has_remote_code and trust_remote_code:
__UpperCAmelCase : str = get_class_from_dynamic_module(
UpperCamelCase , UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""code_revision""" , UpperCamelCase )
if os.path.isdir(UpperCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING:
__UpperCAmelCase : int = IMAGE_PROCESSOR_MAPPING[type(UpperCamelCase )]
return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCamelCase__ ( UpperCamelCase : Optional[int] , UpperCamelCase : Dict ):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(UpperCamelCase , UpperCamelCase )
| 299 | 1 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__a : Tuple = logging.get_logger("""transformers.models.encodec""")
__a : Any = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__a : List[str] = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__a : Optional[int] = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__a : List[str] = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__a : Tuple = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__a : Optional[int] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__a : str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__a : Dict = []
__a : int = []
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
for attribute in key.split("." ):
UpperCamelCase = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
UpperCamelCase = getattr(lowercase_ , lowercase_ ).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
elif weight_type == "running_mean":
UpperCamelCase = value
elif weight_type == "running_var":
UpperCamelCase = value
elif weight_type == "num_batches_tracked":
UpperCamelCase = value
elif weight_type == "weight_ih_l0":
UpperCamelCase = value
elif weight_type == "weight_hh_l0":
UpperCamelCase = value
elif weight_type == "bias_ih_l0":
UpperCamelCase = value
elif weight_type == "bias_hh_l0":
UpperCamelCase = value
elif weight_type == "weight_ih_l1":
UpperCamelCase = value
elif weight_type == "weight_hh_l1":
UpperCamelCase = value
elif weight_type == "bias_ih_l1":
UpperCamelCase = value
elif weight_type == "bias_hh_l1":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def __magic_name__ ( lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCamelCase , UpperCamelCase = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
UpperCamelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
UpperCamelCase = MAPPING_24K
elif model_name == "encodec_48khz":
UpperCamelCase = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowercase_ , lowercase_ ):
logger.info(f'''{name} was ignored''' )
continue
UpperCamelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
UpperCamelCase , UpperCamelCase = key.split(".*." )
if prefix in name and suffix in name:
UpperCamelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(lowercase_ )[0].split("." )[-2]
UpperCamelCase = mapped_key.replace("*" , lowercase_ )
if "weight_g" in name:
UpperCamelCase = "weight_g"
elif "weight_v" in name:
UpperCamelCase = "weight_v"
elif "weight_ih_l0" in name:
UpperCamelCase = "weight_ih_l0"
elif "weight_hh_l0" in name:
UpperCamelCase = "weight_hh_l0"
elif "bias_ih_l0" in name:
UpperCamelCase = "bias_ih_l0"
elif "bias_hh_l0" in name:
UpperCamelCase = "bias_hh_l0"
elif "weight_ih_l1" in name:
UpperCamelCase = "weight_ih_l1"
elif "weight_hh_l1" in name:
UpperCamelCase = "weight_hh_l1"
elif "bias_ih_l1" in name:
UpperCamelCase = "bias_ih_l1"
elif "bias_hh_l1" in name:
UpperCamelCase = "bias_hh_l1"
elif "bias" in name:
UpperCamelCase = "bias"
elif "weight" in name:
UpperCamelCase = "weight"
elif "running_mean" in name:
UpperCamelCase = "running_mean"
elif "running_var" in name:
UpperCamelCase = "running_var"
elif "num_batches_tracked" in name:
UpperCamelCase = "num_batches_tracked"
else:
UpperCamelCase = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , ) -> Any:
'''simple docstring'''
if config_path is not None:
UpperCamelCase = EncodecConfig.from_pretrained(lowercase_ )
else:
UpperCamelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
UpperCamelCase = [8, 5, 4, 4]
UpperCamelCase = [2.2]
UpperCamelCase = 64
UpperCamelCase = 32000
UpperCamelCase = 2048
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
elif model_name == "encodec_48khz":
UpperCamelCase = [8, 5, 4, 2]
UpperCamelCase = [3.0, 6.0, 12.0, 24.0]
UpperCamelCase = 48000
UpperCamelCase = 2
UpperCamelCase = False
UpperCamelCase = "time_group_norm"
UpperCamelCase = True
UpperCamelCase = 1.0
UpperCamelCase = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
UpperCamelCase = EncodecModel(lowercase_ )
UpperCamelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowercase_ )
UpperCamelCase = torch.load(lowercase_ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
UpperCamelCase = original_checkpoint["best_state"]
recursively_load_weights(lowercase_ , lowercase_ , lowercase_ )
model.save_pretrained(lowercase_ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
__a : Any = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__a : Optional[int] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 606 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__a : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
requires_backends(self , "decord" )
self.check_model_type(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> List[str]:
"""simple docstring"""
UpperCamelCase = {}
if frame_sampling_rate is not None:
UpperCamelCase = frame_sampling_rate
if num_frames is not None:
UpperCamelCase = num_frames
UpperCamelCase = {}
if top_k is not None:
UpperCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 ) -> List[Any]:
"""simple docstring"""
if num_frames is None:
UpperCamelCase = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
UpperCamelCase = BytesIO(requests.get(SCREAMING_SNAKE_CASE ).content )
UpperCamelCase = VideoReader(SCREAMING_SNAKE_CASE )
videoreader.seek(0 )
UpperCamelCase = 0
UpperCamelCase = num_frames * frame_sampling_rate - 1
UpperCamelCase = np.linspace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num=SCREAMING_SNAKE_CASE , dtype=np.intaa )
UpperCamelCase = videoreader.get_batch(SCREAMING_SNAKE_CASE ).asnumpy()
UpperCamelCase = list(SCREAMING_SNAKE_CASE )
UpperCamelCase = self.image_processor(SCREAMING_SNAKE_CASE , return_tensors=self.framework )
return model_inputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = self.model(**SCREAMING_SNAKE_CASE )
return model_outputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> Optional[Any]:
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase , UpperCamelCase = probs.topk(SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
UpperCamelCase = scores.tolist()
UpperCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
| 606 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 553 |
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
a__ : List[str] = 6_37_81_37.0
a__ : Tuple = 6_35_67_52.31_42_45
a__ : str = 6_3_7_8_1_3_7
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (AXIS_A - AXIS_B) / AXIS_A
__SCREAMING_SNAKE_CASE = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
__SCREAMING_SNAKE_CASE = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
__SCREAMING_SNAKE_CASE = radians(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = radians(lowerCAmelCase_ )
# Equation
__SCREAMING_SNAKE_CASE = sin((phi_a - phi_a) / 2 )
__SCREAMING_SNAKE_CASE = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__SCREAMING_SNAKE_CASE = sqrt(sin_sq_phi + (cos(lowerCAmelCase_ ) * cos(lowerCAmelCase_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 553 | 1 |
from __future__ import annotations
import requests
__magic_name__ = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def _lowerCAmelCase ( A__: str , A__: int = 1 , A__: str = "new" , A__: list | None = None ):
'''simple docstring'''
UpperCAmelCase = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__UpperCAmelCase ) - valid_terms ) ):
UpperCAmelCase = F"""Invalid search term: {invalid_search_terms}"""
raise ValueError(__UpperCAmelCase )
UpperCAmelCase = requests.get(
F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 429:
raise requests.HTTPError
UpperCAmelCase = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__UpperCAmelCase )}
UpperCAmelCase = {}
for id_ in range(__UpperCAmelCase ):
UpperCAmelCase = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 254 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] ) -> int:
__magic_name__: Optional[int] = 0
if start < end:
__magic_name__: Union[str, Any] = randint(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: int = a[end]
__magic_name__: Optional[int] = a[pivot]
__magic_name__: Tuple = temp
__magic_name__, __magic_name__: int = _in_place_partition(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
count += _in_place_quick_sort(__UpperCAmelCase , __UpperCAmelCase , p - 1 )
count += _in_place_quick_sort(__UpperCAmelCase , p + 1 , __UpperCAmelCase )
return count
def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
__magic_name__: Union[str, Any] = 0
__magic_name__: str = randint(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: Optional[int] = a[end]
__magic_name__: Optional[int] = a[pivot]
__magic_name__: Optional[int] = temp
__magic_name__: Dict = start - 1
for index in range(__UpperCAmelCase , __UpperCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
__magic_name__: List[Any] = new_pivot_index + 1
__magic_name__: Any = a[new_pivot_index]
__magic_name__: int = a[index]
__magic_name__: Union[str, Any] = temp
__magic_name__: List[Any] = a[new_pivot_index + 1]
__magic_name__: Union[str, Any] = a[end]
__magic_name__: Dict = temp
return new_pivot_index + 1, count
__lowerCamelCase = TemporaryFile()
__lowerCamelCase = 1_00 # 1000 elements are to be sorted
__lowerCamelCase , __lowerCamelCase = 0, 1 # mean and standard deviation
__lowerCamelCase = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
__lowerCamelCase = np.load(outfile)
__lowerCamelCase = len(M) - 1
__lowerCamelCase = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 96 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 413 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class UpperCamelCase_ ( A ):
'''simple docstring'''
a :List[Any] = 'codegen'
a :Optional[int] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _UpperCAmelCase=50_400 , _UpperCAmelCase=2_048 , _UpperCAmelCase=2_048 , _UpperCAmelCase=4_096 , _UpperCAmelCase=28 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase=None , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=50_256 , _UpperCAmelCase=50_256 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = n_ctx
lowerCAmelCase_ = n_positions
lowerCAmelCase_ = n_embd
lowerCAmelCase_ = n_layer
lowerCAmelCase_ = n_head
lowerCAmelCase_ = n_inner
lowerCAmelCase_ = rotary_dim
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = resid_pdrop
lowerCAmelCase_ = embd_pdrop
lowerCAmelCase_ = attn_pdrop
lowerCAmelCase_ = layer_norm_epsilon
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = eos_token_id
super().__init__(
bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase)
class UpperCamelCase_ ( A ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase)
if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase):
# TODO: how to do that better?
lowerCAmelCase_ = 0
@property
def lowercase__ ( self):
lowerCAmelCase_ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}})
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''')
lowerCAmelCase_ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowerCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def lowercase__ ( self):
return self._config.n_layer
@property
def lowercase__ ( self):
return self._config.n_head
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
lowerCAmelCase_ = super(_UpperCAmelCase , self).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase)
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCAmelCase_ = seqlen + 2
lowerCAmelCase_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase_ = [
(torch.zeros(_UpperCAmelCase), torch.zeros(_UpperCAmelCase)) for _ in range(self.num_layers)
]
lowerCAmelCase_ = common_inputs['''attention_mask''']
if self.use_past:
lowerCAmelCase_ = ordered_inputs['''attention_mask'''].dtype
lowerCAmelCase_ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase)] , dim=1)
return ordered_inputs
@property
def lowercase__ ( self):
return 13
| 413 | 1 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowercase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
__a : Dict = XLMProphetNetForConditionalGenerationOld.from_pretrained(_lowerCamelCase )
__a , __a : Union[str, Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
_lowerCamelCase , output_loading_info=_lowerCamelCase )
else:
__a : Any = ProphetNetForConditionalGenerationOld.from_pretrained(_lowerCamelCase )
__a , __a : List[str] = ProphetNetForConditionalGeneration.from_pretrained(
_lowerCamelCase , output_loading_info=_lowerCamelCase )
__a : Optional[int] = ["""key_proj""", """value_proj""", """query_proj"""]
__a : List[Any] = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
__a : Any = key.split(""".""" )
if attributes[0] == "lm_head":
__a : Union[str, Any] = prophet
__a : List[Any] = prophet_old
else:
__a : int = prophet.prophetnet
__a : Optional[int] = prophet_old.model
__a : Tuple = False
for attribute in attributes:
if attribute in mapping:
__a : Optional[int] = mapping[attribute]
if not hasattr(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__a : Union[str, Any] = attribute
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
__a : Dict = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__a : Any = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
__a : Dict = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__a : Any = old_model.bias
logger.info(F'''{attribute} is initialized''' )
__a : Union[str, Any] = True
break
elif attribute in special_keys and hasattr(_lowerCamelCase , """in_proj_weight""" ):
__a : Tuple = old_model.in_proj_weight.shape[0] // 3
__a : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__a : Dict = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__a : List[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__a : List[Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__a : List[str] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__a : int = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__a : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__a : int = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings."
__a : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] )
__a : Optional[Any] = True
break
if attribute.isdigit():
__a : List[str] = model[int(_lowerCamelCase )]
__a : Dict = old_model[int(_lowerCamelCase )]
else:
__a : List[Any] = getattr(_lowerCamelCase , _lowerCamelCase )
if old_attribute == "":
__a : int = old_model
else:
if not hasattr(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
__a : List[Any] = getattr(_lowerCamelCase , _lowerCamelCase )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 581 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
__a : List[Any] = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(_lowercase ) , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_lowercase ) , x.transpose() ) )
__a : Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = np.random.randn(3 , 4 )
__a : Optional[Any] = torch.tensor(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase ) , transpose(_lowercase ).numpy() ) )
__a : Optional[Any] = np.random.randn(3 , 4 , 5 )
__a : Any = torch.tensor(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , transpose(_lowercase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = np.random.randn(3 , 4 )
__a : int = tf.constant(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase ) , transpose(_lowercase ).numpy() ) )
__a : Any = np.random.randn(3 , 4 , 5 )
__a : List[str] = tf.constant(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , transpose(_lowercase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = np.random.randn(3 , 4 )
__a : Union[str, Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase ) , np.asarray(transpose(_lowercase ) ) ) )
__a : Optional[int] = np.random.randn(3 , 4 , 5 )
__a : Union[str, Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , np.asarray(transpose(_lowercase , axes=(1, 2, 0) ) ) ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , np.reshape(_lowercase , (4, 3) ) ) )
__a : Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_lowercase , (12, 5) ) , np.reshape(_lowercase , (12, 5) ) ) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = np.random.randn(3 , 4 )
__a : List[Any] = torch.tensor(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , reshape(_lowercase , (4, 3) ).numpy() ) )
__a : List[str] = np.random.randn(3 , 4 , 5 )
__a : Any = torch.tensor(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (12, 5) ) , reshape(_lowercase , (12, 5) ).numpy() ) )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = np.random.randn(3 , 4 )
__a : Dict = tf.constant(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , reshape(_lowercase , (4, 3) ).numpy() ) )
__a : Tuple = np.random.randn(3 , 4 , 5 )
__a : Optional[Any] = tf.constant(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (12, 5) ) , reshape(_lowercase , (12, 5) ).numpy() ) )
@require_flax
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = np.random.randn(3 , 4 )
__a : Tuple = jnp.array(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , np.asarray(reshape(_lowercase , (4, 3) ) ) ) )
__a : Tuple = np.random.randn(3 , 4 , 5 )
__a : Optional[int] = jnp.array(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (12, 5) ) , np.asarray(reshape(_lowercase , (12, 5) ) ) ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_lowercase ) , np.squeeze(_lowercase ) ) )
__a : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , np.squeeze(_lowercase , axis=2 ) ) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = np.random.randn(1 , 3 , 4 )
__a : List[Any] = torch.tensor(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase ) , squeeze(_lowercase ).numpy() ) )
__a : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
__a : str = torch.tensor(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , squeeze(_lowercase , axis=2 ).numpy() ) )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = np.random.randn(1 , 3 , 4 )
__a : Tuple = tf.constant(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase ) , squeeze(_lowercase ).numpy() ) )
__a : Any = np.random.randn(1 , 4 , 1 , 5 )
__a : List[str] = tf.constant(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , squeeze(_lowercase , axis=2 ).numpy() ) )
@require_flax
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = np.random.randn(1 , 3 , 4 )
__a : Union[str, Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase ) , np.asarray(squeeze(_lowercase ) ) ) )
__a : Any = np.random.randn(1 , 4 , 1 , 5 )
__a : Optional[Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , np.asarray(squeeze(_lowercase , axis=2 ) ) ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , np.expand_dims(_lowercase , axis=1 ) ) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = np.random.randn(3 , 4 )
__a : Any = torch.tensor(_lowercase )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , expand_dims(_lowercase , axis=1 ).numpy() ) )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = np.random.randn(3 , 4 )
__a : str = tf.constant(_lowercase )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , expand_dims(_lowercase , axis=1 ).numpy() ) )
@require_flax
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = np.random.randn(3 , 4 )
__a : Optional[Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , np.asarray(expand_dims(_lowercase , axis=1 ) ) ) )
| 581 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCAmelCase : Dict = StableDiffusionInpaintPipeline
UpperCAmelCase : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase : List[str] = frozenset([] )
def snake_case_ ( self : Optional[Any] ) -> List[Any]:
torch.manual_seed(0 )
_a : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__snake_case , )
_a : Tuple = PNDMScheduler(skip_prk_steps=__snake_case )
torch.manual_seed(0 )
_a : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_a : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
_a : Optional[int] = CLIPTextModel(__snake_case )
_a : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_a : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case_ ( self : Dict , __snake_case : List[str] , __snake_case : List[Any]=0 ) -> List[str]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
_a : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
_a : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a : Optional[Any] = Image.fromarray(np.uinta(__snake_case ) ).convert('''RGB''' ).resize((64, 64) )
_a : Any = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(__snake_case ).startswith('''mps''' ):
_a : Optional[Any] = torch.manual_seed(__snake_case )
else:
_a : List[str] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
_a : Optional[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case_ ( self : str ) -> List[str]:
_a : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a : int = self.get_dummy_components()
_a : str = StableDiffusionInpaintPipeline(**__snake_case )
_a : Optional[int] = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
_a : List[str] = self.get_dummy_inputs(__snake_case )
_a : List[Any] = sd_pipe(**__snake_case ).images
_a : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : Optional[Any] = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self : str ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self : Dict ) -> Tuple:
_a : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_a : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_a : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
_a : Dict = '''stabilityai/stable-diffusion-2-inpainting'''
_a : str = StableDiffusionInpaintPipeline.from_pretrained(__snake_case , safety_checker=__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
_a : Any = '''Face of a yellow cat, high resolution, sitting on a park bench'''
_a : int = torch.manual_seed(0 )
_a : List[str] = pipe(
prompt=__snake_case , image=__snake_case , mask_image=__snake_case , generator=__snake_case , output_type='''np''' , )
_a : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def snake_case_ ( self : str ) -> Any:
_a : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_a : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_a : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
_a : List[Any] = '''stabilityai/stable-diffusion-2-inpainting'''
_a : int = StableDiffusionInpaintPipeline.from_pretrained(
__snake_case , torch_dtype=torch.floataa , safety_checker=__snake_case , )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
_a : int = '''Face of a yellow cat, high resolution, sitting on a park bench'''
_a : Optional[int] = torch.manual_seed(0 )
_a : Optional[Any] = pipe(
prompt=__snake_case , image=__snake_case , mask_image=__snake_case , generator=__snake_case , output_type='''np''' , )
_a : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def snake_case_ ( self : Dict ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_a : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_a : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_a : Optional[int] = '''stabilityai/stable-diffusion-2-inpainting'''
_a : List[str] = PNDMScheduler.from_pretrained(__snake_case , subfolder='''scheduler''' )
_a : str = StableDiffusionInpaintPipeline.from_pretrained(
__snake_case , safety_checker=__snake_case , scheduler=__snake_case , torch_dtype=torch.floataa , )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_a : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
_a : str = torch.manual_seed(0 )
_a : Any = pipe(
prompt=__snake_case , image=__snake_case , mask_image=__snake_case , generator=__snake_case , num_inference_steps=2 , output_type='''np''' , )
_a : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 249 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Tuple = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Tuple = 'gpt_neo'
UpperCAmelCase : List[Any] = ['past_key_values']
UpperCAmelCase : int = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Union[str, Any] , __snake_case : Union[str, Any]=50257 , __snake_case : Any=2048 , __snake_case : Dict=2048 , __snake_case : Dict=24 , __snake_case : Union[str, Any]=[[["global", "local"], 12]] , __snake_case : Tuple=16 , __snake_case : List[str]=None , __snake_case : Tuple=256 , __snake_case : Optional[int]="gelu_new" , __snake_case : str=0.0 , __snake_case : Union[str, Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Dict=0.1 , __snake_case : List[str]=1E-5 , __snake_case : Union[str, Any]=0.02 , __snake_case : Optional[int]=True , __snake_case : Tuple=50256 , __snake_case : Union[str, Any]=50256 , **__snake_case : Optional[int] , ) -> Any:
_a : List[Any] = vocab_size
_a : Dict = max_position_embeddings
_a : List[Any] = hidden_size
_a : Tuple = num_layers
_a : List[Any] = num_heads
_a : List[Any] = intermediate_size
_a : Dict = window_size
_a : Tuple = activation_function
_a : Dict = resid_dropout
_a : List[str] = embed_dropout
_a : Optional[Any] = attention_dropout
_a : Dict = classifier_dropout
_a : str = layer_norm_epsilon
_a : Tuple = initializer_range
_a : Union[str, Any] = use_cache
_a : Tuple = bos_token_id
_a : int = eos_token_id
_a : List[Any] = attention_types
_a : str = self.expand_attention_types_params(__snake_case )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
f"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
f"""`config.num_layers = {self.num_layers}`. """
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@staticmethod
def snake_case_ ( __snake_case : Tuple ) -> str:
_a : Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
import torch
_a : Dict = input.size()
_a : Tuple = len(UpperCamelCase_ )
_a : Tuple = shape[dimension]
_a : Union[str, Any] = torch.arange(0 , UpperCamelCase_ , UpperCamelCase_ )
_a : Optional[Any] = torch.div(sizedim - size , UpperCamelCase_ , rounding_mode='''floor''' ) + 1
_a : int = torch.arange(UpperCamelCase_ ) + low_indices[:min_length][:, None]
_a : Tuple = [slice(UpperCamelCase_ )] * rank
_a : List[str] = indices
_a : str = input[s]
_a : Optional[int] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
import torch
_a : List[Any] = torch.arange(1 , UpperCamelCase_ )
_a : Dict = torch.remainder(UpperCamelCase_ , UpperCamelCase_ )
_a : Any = remainders == 0
_a : List[str] = candidates[divisor_indices]
_a : str = torch.max(UpperCamelCase_ )
return largest_divisor, torch.div(UpperCamelCase_ , UpperCamelCase_ , rounding_mode='''floor''' )
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
@property
def snake_case_ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
_a : List[str] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction='''inputs''' )
_a : Tuple = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_a : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def snake_case_ ( self : Any ) -> int:
return self._config.num_heads
def snake_case_ ( self : List[Any] , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]:
_a : Tuple = super(__snake_case , self ).generate_dummy_inputs(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
# We need to order the input in the way they appears in the forward()
_a : Optional[int] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a : Union[str, Any] = seqlen + 2
_a : Any = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_a : Optional[Any] = [
(torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(self.num_layers )
]
_a : Dict = common_inputs['''attention_mask''']
if self.use_past:
_a : Optional[Any] = ordered_inputs['''attention_mask'''].dtype
_a : str = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 )
return ordered_inputs
@property
def snake_case_ ( self : Any ) -> int:
return 13
| 249 | 1 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ):
lowerCamelCase__ = len(__lowerCAmelCase )
print("""The following activities are selected:""" )
# The first activity is always selected
lowerCamelCase__ = 0
print(__lowerCAmelCase , end=""",""" )
# Consider rest of the activities
for j in range(__lowerCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__lowerCAmelCase , end=""",""" )
lowerCamelCase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5]
UpperCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 50 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _snake_case ( snake_case_ ):
'''simple docstring'''
__snake_case = 42
__snake_case = 42
class _snake_case ( nn.Module ):
'''simple docstring'''
__snake_case = 42
__snake_case = (1_6, 3_2, 9_6, 2_5_6)
__snake_case = jnp.floataa
def lowerCAmelCase__ ( self: int ) -> Optional[Any]:
__magic_name__ : Dict = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__magic_name__ : Any = []
for i in range(len(self.block_out_channels ) - 1 ):
__magic_name__ : Optional[Any] = self.block_out_channels[i]
__magic_name__ : Dict = self.block_out_channels[i + 1]
__magic_name__ : Union[str, Any] = nn.Conv(
__UpperCamelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__UpperCamelCase )
__magic_name__ : Tuple = nn.Conv(
__UpperCamelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__UpperCamelCase )
__magic_name__ : Optional[Any] = blocks
__magic_name__ : Dict = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self: str , __UpperCamelCase: Union[str, Any] ) -> Optional[Any]:
__magic_name__ : Dict = self.conv_in(__UpperCamelCase )
__magic_name__ : List[Any] = nn.silu(__UpperCamelCase )
for block in self.blocks:
__magic_name__ : int = block(__UpperCamelCase )
__magic_name__ : str = nn.silu(__UpperCamelCase )
__magic_name__ : Union[str, Any] = self.conv_out(__UpperCamelCase )
return embedding
@flax_register_to_config
class _snake_case ( nn.Module , snake_case_ , snake_case_ ):
'''simple docstring'''
__snake_case = 3_2
__snake_case = 4
__snake_case = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__snake_case = False
__snake_case = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
__snake_case = 2
__snake_case = 8
__snake_case = None
__snake_case = 1_2_8_0
__snake_case = 0.0
__snake_case = False
__snake_case = jnp.floataa
__snake_case = True
__snake_case = 0
__snake_case = "rgb"
__snake_case = (1_6, 3_2, 9_6, 2_5_6)
def lowerCAmelCase__ ( self: int , __UpperCamelCase: jax.random.KeyArray ) -> FrozenDict:
# init input tensors
__magic_name__ : Dict = (1, self.in_channels, self.sample_size, self.sample_size)
__magic_name__ : str = jnp.zeros(__UpperCamelCase , dtype=jnp.floataa )
__magic_name__ : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
__magic_name__ : Any = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__magic_name__ : List[Any] = (1, 3, self.sample_size * 8, self.sample_size * 8)
__magic_name__ : List[Any] = jnp.zeros(__UpperCamelCase , dtype=jnp.floataa )
__magic_name__ , __magic_name__ : List[str] = jax.random.split(__UpperCamelCase )
__magic_name__ : Optional[Any] = {"params": params_rng, "dropout": dropout_rng}
return self.init(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )["params"]
def lowerCAmelCase__ ( self: List[str] ) -> Union[str, Any]:
__magic_name__ : Union[str, Any] = self.block_out_channels
__magic_name__ : Tuple = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__magic_name__ : Optional[Any] = self.num_attention_heads or self.attention_head_dim
# input
__magic_name__ : Dict = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__magic_name__ : List[str] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__magic_name__ : Dict = FlaxTimestepEmbedding(__UpperCamelCase , dtype=self.dtype )
__magic_name__ : Tuple = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
__magic_name__ : Union[str, Any] = self.only_cross_attention
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__magic_name__ : Tuple = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__magic_name__ : Any = (num_attention_heads,) * len(self.down_block_types )
# down
__magic_name__ : Optional[int] = []
__magic_name__ : Union[str, Any] = []
__magic_name__ : Optional[int] = block_out_channels[0]
__magic_name__ : Tuple = nn.Conv(
__UpperCamelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__UpperCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
__magic_name__ : Optional[Any] = output_channel
__magic_name__ : List[Any] = block_out_channels[i]
__magic_name__ : int = i == len(__UpperCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__magic_name__ : List[str] = FlaxCrossAttnDownBlockaD(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
__magic_name__ : Optional[int] = FlaxDownBlockaD(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__UpperCamelCase )
for _ in range(self.layers_per_block ):
__magic_name__ : Any = nn.Conv(
__UpperCamelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__UpperCamelCase )
if not is_final_block:
__magic_name__ : Optional[int] = nn.Conv(
__UpperCamelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__UpperCamelCase )
__magic_name__ : str = down_blocks
__magic_name__ : List[str] = controlnet_down_blocks
# mid
__magic_name__ : Optional[Any] = block_out_channels[-1]
__magic_name__ : List[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=__UpperCamelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
__magic_name__ : List[Any] = nn.Conv(
__UpperCamelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self: Optional[int] , __UpperCamelCase: Any , __UpperCamelCase: Dict , __UpperCamelCase: Optional[Any] , __UpperCamelCase: List[str] , __UpperCamelCase: float = 1.0 , __UpperCamelCase: bool = True , __UpperCamelCase: bool = False , ) -> Union[FlaxControlNetOutput, Tuple]:
__magic_name__ : List[str] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__magic_name__ : Dict = jnp.flip(__UpperCamelCase , axis=1 )
# 1. time
if not isinstance(__UpperCamelCase , jnp.ndarray ):
__magic_name__ : Union[str, Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__UpperCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
__magic_name__ : Union[str, Any] = timesteps.astype(dtype=jnp.floataa )
__magic_name__ : Optional[Any] = jnp.expand_dims(__UpperCamelCase , 0 )
__magic_name__ : Optional[int] = self.time_proj(__UpperCamelCase )
__magic_name__ : Any = self.time_embedding(__UpperCamelCase )
# 2. pre-process
__magic_name__ : Optional[Any] = jnp.transpose(__UpperCamelCase , (0, 2, 3, 1) )
__magic_name__ : List[Any] = self.conv_in(__UpperCamelCase )
__magic_name__ : List[Any] = jnp.transpose(__UpperCamelCase , (0, 2, 3, 1) )
__magic_name__ : Dict = self.controlnet_cond_embedding(__UpperCamelCase )
sample += controlnet_cond
# 3. down
__magic_name__ : Optional[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__magic_name__ , __magic_name__ : Dict = down_block(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , deterministic=not train )
else:
__magic_name__ , __magic_name__ : Union[str, Any] = down_block(__UpperCamelCase , __UpperCamelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__magic_name__ : Optional[int] = self.mid_block(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , deterministic=not train )
# 5. contronet blocks
__magic_name__ : Optional[Any] = ()
for down_block_res_sample, controlnet_block in zip(__UpperCamelCase , self.controlnet_down_blocks ):
__magic_name__ : Any = controlnet_block(__UpperCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
__magic_name__ : int = controlnet_down_block_res_samples
__magic_name__ : List[str] = self.controlnet_mid_block(__UpperCamelCase )
# 6. scaling
__magic_name__ : Optional[Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__UpperCamelCase , mid_block_res_sample=__UpperCamelCase ) | 436 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase ={"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =[
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 712 |
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ = 5_0_0_0_0_0_0_0 ):
UpperCamelCase__ : Any = set()
UpperCamelCase__ : Any = int((limit - 2_4) ** (1 / 2) )
UpperCamelCase__ : Dict = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , UpperCamelCase__ ) ) )
for primea in primes:
UpperCamelCase__ : Dict = primea * primea
for primea in primes:
UpperCamelCase__ : str = primea * primea * primea
if square + cube >= limit - 1_6:
break
for primea in primes:
UpperCamelCase__ : Dict = primea * primea * primea * primea
UpperCamelCase__ : Tuple = square + cube + tetr
if total >= limit:
break
ret.add(UpperCamelCase__ )
return len(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 462 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__UpperCamelCase : List[Any] = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 468 |
import operator as op
__UpperCamelCase : Optional[Any] = "scaler.pt"
__UpperCamelCase : Optional[Any] = "pytorch_model"
__UpperCamelCase : str = "random_states"
__UpperCamelCase : Optional[int] = "optimizer"
__UpperCamelCase : Optional[int] = "scheduler"
__UpperCamelCase : str = "pytorch_model.bin"
__UpperCamelCase : List[str] = "pytorch_model.bin.index.json"
__UpperCamelCase : List[str] = "model.safetensors"
__UpperCamelCase : Optional[int] = "model.safetensors.index.json"
__UpperCamelCase : List[str] = "1.10.2"
__UpperCamelCase : Dict = "py38"
__UpperCamelCase : List[str] = "4.17.0"
__UpperCamelCase : Any = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
__UpperCamelCase : Any = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
__UpperCamelCase : int = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
__UpperCamelCase : Dict = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
__UpperCamelCase : str = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
__UpperCamelCase : List[Any] = "2.0.1"
__UpperCamelCase : int = ["pdsh", "standard", "openmpi", "mvapich"]
__UpperCamelCase : List[str] = ["default", "reduce-overhead", "max-autotune"]
__UpperCamelCase : List[Any] = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
__UpperCamelCase : List[Any] = [
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
__UpperCamelCase : List[str] = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
__UpperCamelCase : int = ["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 468 | 1 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowercase = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_lowercase = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str )-> Dict:
A__ = SavedModel()
A__ = []
with open(os.path.join(UpperCamelCase_ , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f:
A__ = json.load(UpperCamelCase_ )['''opsets''']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(UpperCamelCase_ )] )
with open(UpperCamelCase_ , '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
A__ = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
A__ = sorted(UpperCamelCase_ )
A__ = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(UpperCamelCase_ )
if strict and len(UpperCamelCase_ ) > 0:
raise Exception(f"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(UpperCamelCase_ ) > 0:
print(f"Found the following incompatible ops for the opset {opset}:" )
print(*UpperCamelCase_ , sep='''\n''' )
else:
print(f"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
_lowercase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 709 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = "▁"
_lowercase = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
_lowercase = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
_lowercase = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
_lowercase = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
_lowercase = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = ["input_ids"]
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = RESOURCE_FILES_NAMES
def __init__( self , a__ , a__=None , a__=False , a__="utf8" , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__ = None , **a__ , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , vocab_file=a__ , encoding=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
A__ = do_lower_case
A__ = sentencepiece_model_ckpt
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(a__)
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
A__ = self.load_vocab(filepath=a__)
else:
A__ = {self.sp_model.id_to_piece(a__): id for id in range(self.sp_model.get_piece_size())}
A__ = {v: k for k, v in self.vocab.items()}
def snake_case_ ( self , a__):
if text is None:
return None
A__ = self.tokenize(a__)
A__ , A__ = '''''', []
for i, ch in enumerate(a__):
if ch in self.SP_CHAR_MAPPING:
A__ = self.SP_CHAR_MAPPING.get(a__)
else:
A__ = unicodedata.normalize('''NFKC''' , a__)
if self.is_whitespace(a__):
continue
normalized_text += ch
char_mapping.extend([i] * len(a__))
A__ , A__ , A__ = normalized_text, [], 0
if self.do_lower_case:
A__ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
A__ = token[1:]
A__ = text[offset:].index(a__) + offset
A__ = start + len(a__)
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
A__ = end
return token_mapping
@property
def snake_case_ ( self):
return len(self.vocab)
def snake_case_ ( self):
return dict(self.vocab , **self.added_tokens_encoder)
def __getstate__( self):
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self , a__):
A__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.sentencepiece_model_ckpt)
def snake_case_ ( self , a__):
return "".join((self.SP_CHAR_MAPPING.get(a__ , a__) for c in text))
def snake_case_ ( self , a__ , a__=False , a__=6_4 , a__=0.1):
if self.sp_model_kwargs.get('''enable_sampling''') is True:
A__ = True
if self.sp_model_kwargs.get('''alpha''') is not None:
A__ = self.sp_model_kwargs.get('''alpha''')
if self.sp_model_kwargs.get('''nbest_size''') is not None:
A__ = self.sp_model_kwargs.get('''nbest_size''')
if not enable_sampling:
A__ = self.sp_model.EncodeAsPieces(a__)
else:
A__ = self.sp_model.SampleEncodeAsPieces(a__ , a__ , a__)
A__ = []
for pi, piece in enumerate(a__):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(a__) and pi != 0:
new_pieces.append(a__)
continue
else:
continue
A__ = 0
for i, chunk in enumerate(a__):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(a__) or self.is_punct(a__):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
new_pieces.append(a__)
A__ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
A__ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
A__ = i
if len(a__) > lst_i:
new_pieces.append(piece[lst_i:])
return new_pieces
def snake_case_ ( self , a__):
A__ = ''''''.join(a__).replace(a__ , ''' ''').strip()
return out_string
def snake_case_ ( self , a__):
A__ = self.convert_ids_to_tokens(a__)
A__ = ''''''.join(a__).replace(a__ , ''' ''').strip()
return out_string
def snake_case_ ( self , a__):
return self.vocab.get(a__ , self.vocab.get(self.unk_token))
def snake_case_ ( self , a__):
return self.reverse_vocab.get(a__ , self.unk_token)
def snake_case_ ( self , a__ , a__=None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def snake_case_ ( self , a__ , a__=None):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def snake_case_ ( self , a__ , a__=None , a__=False):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a__)) + [1, 1] + ([0] * len(a__)) + [1]
return [1] + ([0] * len(a__)) + [1]
def snake_case_ ( self , a__ , a__ = None):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(a__) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(a__) + 1) + [1] * (len(a__) + 3)
def snake_case_ ( self , a__):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def snake_case_ ( self , a__):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def snake_case_ ( self , a__):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def snake_case_ ( self , a__):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(a__) == 1:
A__ = unicodedata.category(a__)
if cat == "Zs":
return True
return False
def snake_case_ ( self , a__):
A__ = {}
with io.open(a__ , '''r''' , encoding='''utf-8''') as f:
for index, line in enumerate(a__):
A__ = line.rstrip('''\n''')
A__ = int(a__)
return token_to_idx
def snake_case_ ( self , a__ , a__ = None):
A__ = 0
if os.path.isdir(a__):
A__ = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
else:
A__ = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(a__ , '''w''' , encoding='''utf-8''') as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda a__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
''' Please check that the vocabulary is not corrupted!''')
A__ = token_index
writer.write(token + '''\n''')
index += 1
A__ = os.path.join(a__ , '''sentencepiece.bpe.model''')
with open(a__ , '''wb''') as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(a__)
return (vocab_file,)
| 526 | 0 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__ = "▁", lowerCamelCase__ = True, lowerCamelCase__ = "<unk>", lowerCamelCase__ = "</s>", lowerCamelCase__ = "<pad>", ):
A : Any = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
A : Tuple = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
A : List[Any] = token_dict['token']
A : List[Any] = Tokenizer(Unigram() )
A : str = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ), """ """ ),
normalizers.Lowercase(),
] )
A : Optional[int] = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowerCamelCase__, add_prefix_space=lowerCamelCase__ ),
pre_tokenizers.Digits(individual_digits=lowerCamelCase__ ),
pre_tokenizers.Punctuation(),
] )
A : Optional[Any] = decoders.Metaspace(replacement=lowerCamelCase__, add_prefix_space=lowerCamelCase__ )
A : Optional[Any] = TemplateProcessing(
single=f'''$A {self.special_tokens["eos"]["token"]}''', special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])], )
A : Optional[int] = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = 8000, lowerCamelCase__ = True, ):
A : int = trainers.UnigramTrainer(
vocab_size=lowerCamelCase__, special_tokens=self.special_tokens_list, show_progress=lowerCamelCase__, )
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : Dict = [files]
self._tokenizer.train(lowerCamelCase__, trainer=lowerCamelCase__ )
self.add_unk_id()
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = 8000, lowerCamelCase__ = True, ):
A : Dict = trainers.UnigramTrainer(
vocab_size=lowerCamelCase__, special_tokens=self.special_tokens_list, show_progress=lowerCamelCase__, )
self._tokenizer.train_from_iterator(lowerCamelCase__, trainer=lowerCamelCase__ )
self.add_unk_id()
def _lowerCAmelCase ( self ):
A : int = json.loads(self._tokenizer.to_str() )
A : str = self.special_tokens['unk']['id']
A : List[str] = Tokenizer.from_str(json.dumps(lowerCamelCase__ ) )
| 662 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowercase :
'''simple docstring'''
def __init__( self : List[Any] , snake_case : Optional[Any] , snake_case : int=13 , snake_case : int=7 , snake_case : Dict=True , snake_case : Union[str, Any]=True , snake_case : Union[str, Any]=True , snake_case : List[Any]=True , snake_case : Tuple=99 , snake_case : Any=[1, 1, 2] , snake_case : Dict=1 , snake_case : Optional[int]=32 , snake_case : Union[str, Any]=4 , snake_case : Optional[Any]=8 , snake_case : Dict=37 , snake_case : int="gelu_new" , snake_case : Optional[Any]=0.1 , snake_case : List[str]=0.1 , snake_case : Any=0.0 , snake_case : Dict=512 , snake_case : List[str]=3 , snake_case : Any=0.02 , snake_case : List[str]=3 , snake_case : Optional[Any]=4 , snake_case : Dict=None , snake_case : Any=False , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Any = use_input_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Dict = block_sizes
SCREAMING_SNAKE_CASE : Dict = num_decoder_layers
SCREAMING_SNAKE_CASE : int = d_model
SCREAMING_SNAKE_CASE : Union[str, Any] = n_head
SCREAMING_SNAKE_CASE : Optional[int] = d_head
SCREAMING_SNAKE_CASE : int = d_inner
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout
SCREAMING_SNAKE_CASE : Any = attention_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = activation_dropout
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE : Tuple = num_choices
SCREAMING_SNAKE_CASE : str = scope
SCREAMING_SNAKE_CASE : int = initializer_std
# Used in the tests to check the size of the first attention layer
SCREAMING_SNAKE_CASE : int = n_head
# Used in the tests to check the size of the first hidden state
SCREAMING_SNAKE_CASE : List[str] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
SCREAMING_SNAKE_CASE : List[str] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
SCREAMING_SNAKE_CASE : Dict = self.num_hidden_layers + 2
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Dict = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase_ ( self : Dict , snake_case : List[str] , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Any , snake_case : Dict , snake_case : Any , snake_case : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = TFFunnelModel(config=snake_case )
SCREAMING_SNAKE_CASE : Dict = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE : Optional[Any] = model(snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : List[Any] = model(snake_case )
SCREAMING_SNAKE_CASE : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = TFFunnelModel(config=snake_case )
SCREAMING_SNAKE_CASE : Dict = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : int = TFFunnelModel(config=snake_case )
SCREAMING_SNAKE_CASE : List[Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCamelCase_ ( self : str , snake_case : Any , snake_case : Dict , snake_case : Any , snake_case : Optional[Any] , snake_case : Tuple , snake_case : Dict , snake_case : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = TFFunnelBaseModel(config=snake_case )
SCREAMING_SNAKE_CASE : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE : Union[str, Any] = model(snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : Tuple = model(snake_case )
SCREAMING_SNAKE_CASE : Any = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : int = TFFunnelBaseModel(config=snake_case )
SCREAMING_SNAKE_CASE : Any = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = TFFunnelBaseModel(config=snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCamelCase_ ( self : Optional[int] , snake_case : Any , snake_case : str , snake_case : Optional[int] , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFFunnelForPreTraining(config=snake_case )
SCREAMING_SNAKE_CASE : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE : List[str] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : List[str] , snake_case : Any , snake_case : int , snake_case : List[str] , snake_case : Tuple , snake_case : int , snake_case : str , snake_case : List[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = TFFunnelForMaskedLM(config=snake_case )
SCREAMING_SNAKE_CASE : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE : Tuple = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Any , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : Dict , snake_case : Optional[Any] , snake_case : Dict , snake_case : Tuple , snake_case : str , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.num_labels
SCREAMING_SNAKE_CASE : Any = TFFunnelForSequenceClassification(config=snake_case )
SCREAMING_SNAKE_CASE : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE : Tuple = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Optional[Any] , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Any , snake_case : Dict , snake_case : Dict , snake_case : int , snake_case : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = TFFunnelForMultipleChoice(config=snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Tuple = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Tuple = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Optional[int] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE : List[str] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : Any , snake_case : Optional[Any] , snake_case : Dict , snake_case : Any , snake_case : int , snake_case : Union[str, Any] , snake_case : Any , snake_case : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = TFFunnelForTokenClassification(config=snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE : Tuple = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : str , snake_case : Optional[Any] , snake_case : Any , snake_case : Dict , snake_case : Any , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TFFunnelForQuestionAnswering(config=snake_case )
SCREAMING_SNAKE_CASE : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE : Dict = model(snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Any = config_and_inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase):
'''simple docstring'''
UpperCAmelCase : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase : Optional[Any] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase : Any = False
UpperCAmelCase : Tuple = False
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = TFFunnelModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=snake_case )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
@require_tf
class lowercase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase):
'''simple docstring'''
UpperCAmelCase : Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCAmelCase : List[str] = False
UpperCAmelCase : Tuple = False
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFFunnelModelTester(self , base=snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self , config_class=snake_case )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*snake_case )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case ) | 352 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 720 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __A ( unittest.TestCase ):
UpperCAmelCase__ = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__: int = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
__magic_name__: Dict = text_generator("""This is a test""" , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
__magic_name__: Dict = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
__snake_case , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
__magic_name__: Optional[Any] = text_generator("""This is a test""" , do_sample=__snake_case , num_return_sequences=2 , return_tensors=__snake_case )
self.assertEqual(
__snake_case , [
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
] , )
__magic_name__: List[str] = text_generator.model.config.eos_token_id
__magic_name__: Dict = """<pad>"""
__magic_name__: Dict = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=__snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=__snake_case , )
self.assertEqual(
__snake_case , [
[
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
],
[
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
],
] , )
@require_tf
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__: int = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
__magic_name__: Optional[Any] = text_generator("""This is a test""" , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
__magic_name__: Optional[int] = text_generator(["""This is a test""", """This is a second test"""] , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Tuple ) -> Any:
__magic_name__: int = TextGenerationPipeline(model=__snake_case , tokenizer=__snake_case )
return text_generator, ["This is a test", "Another test"]
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: Tuple = """Hello I believe in"""
__magic_name__: List[str] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
__magic_name__: List[Any] = text_generator(__snake_case )
self.assertEqual(
__snake_case , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
__magic_name__: List[str] = text_generator(__snake_case , stop_sequence=""" fe""" )
self.assertEqual(__snake_case , [{"""generated_text""": """Hello I believe in fe"""}] )
def lowerCamelCase__ ( self : Any , __snake_case : List[Any] , __snake_case : Union[str, Any] ) -> str:
__magic_name__: Optional[int] = text_generator.model
__magic_name__: Union[str, Any] = text_generator.tokenizer
__magic_name__: Union[str, Any] = text_generator("""This is a test""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
__magic_name__: str = text_generator("""This is a test""" , return_full_text=__snake_case )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
__magic_name__: Optional[int] = pipeline(task="""text-generation""" , model=__snake_case , tokenizer=__snake_case , return_full_text=__snake_case )
__magic_name__: Tuple = text_generator("""This is a test""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
__magic_name__: Optional[int] = text_generator("""This is a test""" , return_full_text=__snake_case )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
__magic_name__: List[str] = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__magic_name__: Union[str, Any] = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
] , )
with self.assertRaises(__snake_case ):
__magic_name__: Any = text_generator("""test""" , return_full_text=__snake_case , return_text=__snake_case )
with self.assertRaises(__snake_case ):
__magic_name__: List[str] = text_generator("""test""" , return_full_text=__snake_case , return_tensors=__snake_case )
with self.assertRaises(__snake_case ):
__magic_name__: Tuple = text_generator("""test""" , return_text=__snake_case , return_tensors=__snake_case )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__magic_name__: int = text_generator("""""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__magic_name__: Any = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__magic_name__: Union[str, Any] = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 5_0_0 , max_new_tokens=2_0 )
__magic_name__: List[str] = text_generator("""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(__snake_case ):
text_generator(
"""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
import torch
# Classic `model_kwargs`
__magic_name__: Optional[int] = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__magic_name__: Optional[int] = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__magic_name__: Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__magic_name__: Optional[Any] = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__magic_name__: int = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__magic_name__: Any = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def lowerCamelCase__ ( self : List[str] ) -> Any:
import torch
__magic_name__: List[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCamelCase__ ( self : Dict ) -> Any:
import torch
__magic_name__: List[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=__snake_case , top_p=0.5 )
def lowerCamelCase__ ( self : List[str] ) -> Any:
__magic_name__: Optional[int] = """Hello world"""
__magic_name__: List[Any] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
__magic_name__: str = logging.get_logger("""transformers.generation.tf_utils""" )
else:
__magic_name__: Any = logging.get_logger("""transformers.generation.utils""" )
__magic_name__: Union[str, Any] = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__snake_case ) as cl:
__magic_name__: Dict = text_generator(__snake_case , max_length=1_0 , max_new_tokens=1 )
self.assertIn(__snake_case , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__snake_case ) as cl:
__magic_name__: str = text_generator(__snake_case , max_new_tokens=1 )
self.assertNotIn(__snake_case , cl.out )
with CaptureLogger(__snake_case ) as cl:
__magic_name__: Dict = text_generator(__snake_case , max_length=1_0 )
self.assertNotIn(__snake_case , cl.out )
| 213 | 0 |
def _lowercase ( __UpperCamelCase : int = 10**9 ):
snake_case__ = 1
snake_case__ = 2
snake_case__ = 0
snake_case__ = 0
snake_case__ = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
snake_case__ = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 214 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCAmelCase : Union[str, Any] = _symbol_database.Default()
lowerCAmelCase : int = _descriptor_pool.Default().AddSerializedFile(
b'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
lowerCAmelCase : List[str] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCAmelCase : Tuple = None
lowerCAmelCase : List[Any] = b'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCAmelCase : Tuple = 45
lowerCAmelCase : Optional[int] = 1581
lowerCAmelCase : Dict = 1517
lowerCAmelCase : Any = 1570
lowerCAmelCase : Any = 1584
lowerCAmelCase : Optional[Any] = 1793
lowerCAmelCase : Optional[Any] = 1795
lowerCAmelCase : List[str] = 1916
lowerCAmelCase : Any = 1864
lowerCAmelCase : Dict = 1905
lowerCAmelCase : Dict = 1919
lowerCAmelCase : Any = 2429
lowerCAmelCase : List[Any] = 2208
lowerCAmelCase : Tuple = 2418
lowerCAmelCase : List[Any] = 2323
lowerCAmelCase : List[str] = 2407
# @@protoc_insertion_point(module_scope)
| 214 | 1 |
"""simple docstring"""
from __future__ import annotations
import queue
class __magic_name__ :
def __init__( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = data
_lowerCAmelCase = None
_lowerCAmelCase = None
def A__ ( ):
"""simple docstring"""
print('\n********Press N to stop entering at any point of time********\n' )
_lowerCAmelCase = input('Enter the value of the root node: ' ).strip().lower()
_lowerCAmelCase = queue.Queue()
_lowerCAmelCase = TreeNode(int(__lowerCamelCase ) )
q.put(__lowerCamelCase )
while not q.empty():
_lowerCAmelCase = q.get()
_lowerCAmelCase = F'''Enter the left node of {node_found.data}: '''
_lowerCAmelCase = input(__lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(__lowerCamelCase ) )
_lowerCAmelCase = left_node
q.put(__lowerCamelCase )
_lowerCAmelCase = F'''Enter the right node of {node_found.data}: '''
_lowerCAmelCase = input(__lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(__lowerCamelCase ) )
_lowerCAmelCase = right_node
q.put(__lowerCamelCase )
raise
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not node:
return
print(node.data, end=',' )
pre_order(node.left )
pre_order(node.right )
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data, end=',' )
in_order(node.right )
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data, end=',' )
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(__lowerCamelCase )
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data, end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(__lowerCamelCase )
while not q.empty():
_lowerCAmelCase = []
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data, end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__lowerCamelCase )
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data, end=',' )
stack.append(__lowerCamelCase )
_lowerCAmelCase = n.left
# end of while means current node doesn't have left child
_lowerCAmelCase = stack.pop()
# start to traverse its right child
_lowerCAmelCase = n.right
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n:
stack.append(__lowerCamelCase )
_lowerCAmelCase = n.left
_lowerCAmelCase = stack.pop()
print(n.data, end=',' )
_lowerCAmelCase = n.right
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not node:
return
_lowerCAmelCase , _lowerCAmelCase = [], []
_lowerCAmelCase = node
stacka.append(__lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
_lowerCAmelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data, end=',' )
def A__ ( __lowerCamelCase = "", __lowerCamelCase=5_0, __lowerCamelCase="*" ):
"""simple docstring"""
if not s:
return "\n" + width * char
_lowerCAmelCase , _lowerCAmelCase = divmod(width - len(__lowerCamelCase ) - 2, 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
a__ : TreeNode = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 309 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
a__ : List[str] = """docs/source/en/_toctree.yml"""
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = defaultdict(__lowerCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
_lowerCAmelCase = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase = []
for duplicate_key in duplicates:
_lowerCAmelCase = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__lowerCamelCase ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__lowerCamelCase, key=lambda __lowerCamelCase : s["title"].lower() )
def A__ ( __lowerCamelCase=False ):
"""simple docstring"""
with open(__lowerCamelCase, encoding='utf-8' ) as f:
_lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase = content[api_idx]['sections']
# Then to the model doc
_lowerCAmelCase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_lowerCAmelCase = api_doc[model_idx]['sections']
_lowerCAmelCase = [(idx, section) for idx, section in enumerate(__lowerCamelCase ) if 'sections' in section]
_lowerCAmelCase = False
for idx, modality_doc in modalities_docs:
_lowerCAmelCase = modality_doc['sections']
_lowerCAmelCase = clean_model_doc_toc(__lowerCamelCase )
if old_modality_doc != new_modality_doc:
_lowerCAmelCase = True
if overwrite:
_lowerCAmelCase = new_modality_doc
if diff:
if overwrite:
_lowerCAmelCase = model_doc
_lowerCAmelCase = api_doc
with open(__lowerCamelCase, 'w', encoding='utf-8' ) as f:
f.write(yaml.dump(__lowerCamelCase, allow_unicode=__lowerCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a__ : str = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 309 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( a : str = 50000000 ):
a__ = set()
a__ = int((limit - 24) ** (1 / 2) )
a__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , a ) ) )
for primea in primes:
a__ = primea * primea
for primea in primes:
a__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
a__ = primea * primea * primea * primea
a__ = square + cube + tetr
if total >= limit:
break
ret.add(a )
return len(a )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 394 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
A_ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self: List[Any] , *UpperCamelCase_: Union[str, Any] , **UpperCamelCase_: Tuple ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 391 | 0 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _lowercase :
_UpperCAmelCase = None
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = None
_UpperCAmelCase = 1
_UpperCAmelCase = None
_UpperCAmelCase = False
_UpperCAmelCase = None
_UpperCAmelCase = None
def A ( self : Any ) -> "DownloadConfig":
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(__lowerCAmelCase ) for k, v in self.__dict__.items()} )
| 32 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _lowercase :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
a = ""
a = ""
a = []
a = 0
a = 256
a = 0
a = 0
a = 0
a = 0
def A ( self : Optional[Any] , __lowerCAmelCase : Any ) -> int:
"""simple docstring"""
a = cva.imread(__lowerCAmelCase , 0 )
a = copy.deepcopy(self.img )
a , a , a = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
a = np.sum(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
a = x[i] / self.k
self.sk += prk
a = (self.L - 1) * self.sk
if self.rem != 0:
a = int(last % last )
a = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__lowerCAmelCase )
a = int(np.ma.count(self.img ) / self.img[1].size )
a = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
a = self.img[j][i]
if num != self.last_list[num]:
a = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def A ( self : Any ) -> int:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def A ( self : Any ) -> int:
"""simple docstring"""
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
A_ : List[Any] = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
A_ : int = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 32 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
UpperCAmelCase_ : List[str] = "facebook/wmt19-en-de"
UpperCAmelCase_ : List[Any] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
UpperCAmelCase_ : str = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
UpperCAmelCase_ : Union[str, Any] = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
UpperCAmelCase_ : Any = tokenizer(["Making tiny model"], return_tensors="pt")
UpperCAmelCase_ : Optional[Any] = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
UpperCAmelCase_ : Tuple = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 491 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ : Any = logging.get_logger(__name__)
class UpperCamelCase ( enum.Enum ):
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Optional[Any] = 1
@add_end_docstrings(_UpperCAmelCase )
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : str = """generated"""
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __A ( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , **UpperCAmelCase__ , ):
A__ = {}
if truncation is not None:
A__ = truncation
A__ = generate_kwargs
A__ = {}
if return_tensors is not None and return_type is None:
A__ = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
A__ = return_type
if clean_up_tokenization_spaces is not None:
A__ = clean_up_tokenization_spaces
if stop_sequence is not None:
A__ = self.tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
A__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
return True
def __A ( self , *UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , UpperCAmelCase__ ):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" )
A__ = ([prefix + arg for arg in args[0]],)
A__ = True
elif isinstance(args[0] , UpperCAmelCase__ ):
A__ = (prefix + args[0],)
A__ = False
else:
raise ValueError(
F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
A__ = self.tokenizer(*UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
A__ = super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
if (
isinstance(args[0] , UpperCAmelCase__ )
and all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for el in args[0] )
and all(len(UpperCAmelCase__ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=TruncationStrategy.DO_NOT_TRUNCATE , **UpperCAmelCase__ ):
A__ = self._parse_and_tokenize(UpperCAmelCase__ , truncation=UpperCAmelCase__ , **UpperCAmelCase__ )
return inputs
def __A ( self , UpperCAmelCase__ , **UpperCAmelCase__ ):
if self.framework == "pt":
A__ , A__ = model_inputs["input_ids"].shape
elif self.framework == "tf":
A__ , A__ = tf.shape(model_inputs["input_ids"] ).numpy()
A__ = generate_kwargs.get("min_length" , self.model.config.min_length )
A__ = generate_kwargs.get("max_length" , self.model.config.max_length )
self.check_inputs(UpperCAmelCase__ , generate_kwargs["min_length"] , generate_kwargs["max_length"] )
A__ = self.model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ )
A__ = output_ids.shape[0]
if self.framework == "pt":
A__ = output_ids.reshape(UpperCAmelCase__ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
A__ = tf.reshape(UpperCAmelCase__ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=ReturnType.TEXT , UpperCAmelCase__=False ):
A__ = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
A__ = {F"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
A__ = {
F"""{self.return_name}_text""": self.tokenizer.decode(
UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ , )
}
records.append(UpperCAmelCase__ )
return records
@add_end_docstrings(_UpperCAmelCase )
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : List[str] = """summary"""
def __call__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
if max_length < min_length:
logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"a summarization task, where outputs shorter than the input are typically wanted, you might "
F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_UpperCAmelCase )
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : int = """translation"""
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
if input_length > 0.9 * max_length:
logger.warning(
F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"increasing your max_length manually, e.g. translator('...', max_length=400)" )
return True
def __A ( self , *UpperCAmelCase__ , UpperCAmelCase__=TruncationStrategy.DO_NOT_TRUNCATE , UpperCAmelCase__=None , UpperCAmelCase__=None ):
if getattr(self.tokenizer , "_build_translation_inputs" , UpperCAmelCase__ ):
return self.tokenizer._build_translation_inputs(
*UpperCAmelCase__ , return_tensors=self.framework , truncation=UpperCAmelCase__ , src_lang=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ )
else:
return super()._parse_and_tokenize(*UpperCAmelCase__ , truncation=UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__=None , UpperCAmelCase__=None , **UpperCAmelCase__ ):
A__ , A__ , A__ = super()._sanitize_parameters(**UpperCAmelCase__ )
if src_lang is not None:
A__ = src_lang
if tgt_lang is not None:
A__ = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
A__ = kwargs.get("task" , self.task )
A__ = task.split("_" )
if task and len(UpperCAmelCase__ ) == 4:
# translation, XX, to YY
A__ = items[1]
A__ = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 491 | 1 |
'''simple docstring'''
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ ) -> None:
_A : Any = size
_A : int = [0] * size
_A : Union[str, Any] = [0] * size
@staticmethod
def _lowerCamelCase ( UpperCAmelCase__ ) -> int:
return index | (index + 1)
@staticmethod
def _lowerCamelCase ( UpperCAmelCase__ ) -> int:
return (index & (index + 1)) - 1
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> None:
_A : str = value
while index < self.size:
_A : Optional[Any] = self.get_prev(UpperCAmelCase__ ) + 1
if current_left_border == index:
_A : int = value
else:
_A : List[str] = max(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_A : List[Any] = self.get_next(UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
right -= 1 # Because of right is exclusive
_A : Any = 0
while left <= right:
_A : Dict = self.get_prev(UpperCAmelCase__ )
if left <= current_left:
_A : Dict = max(UpperCAmelCase__ , self.tree[right] )
_A : Any = current_left
else:
_A : Dict = max(UpperCAmelCase__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 417 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
__UpperCamelCase : Union[str, Any] = namedtuple('''covid_data''', '''cases deaths recovered''')
def lowercase ( lowerCAmelCase : str = "https://www.worldometers.info/coronavirus/"):
"""simple docstring"""
_A : int = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(lowerCAmelCase).content).xpath(lowerCAmelCase))
__UpperCamelCase : List[Any] = '''Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}'''
print(fmt.format(*covid_stats()))
| 417 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : int = 'vivit'
def __init__( self: List[Any] , _UpperCAmelCase: Optional[int]=224 , _UpperCAmelCase: int=32 , _UpperCAmelCase: List[Any]=[2, 16, 16] , _UpperCAmelCase: Dict=3 , _UpperCAmelCase: Optional[int]=768 , _UpperCAmelCase: Any=12 , _UpperCAmelCase: Any=12 , _UpperCAmelCase: List[Any]=3072 , _UpperCAmelCase: List[Any]="gelu_fast" , _UpperCAmelCase: List[Any]=0.0 , _UpperCAmelCase: Optional[Any]=0.0 , _UpperCAmelCase: int=0.0_2 , _UpperCAmelCase: Optional[Any]=1e-0_6 , _UpperCAmelCase: Tuple=True , **_UpperCAmelCase: List[Any] , ):
_lowerCAmelCase :List[Any] = hidden_size
_lowerCAmelCase :Optional[Any] = num_hidden_layers
_lowerCAmelCase :Union[str, Any] = num_attention_heads
_lowerCAmelCase :str = intermediate_size
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :int = hidden_dropout_prob
_lowerCAmelCase :Tuple = attention_probs_dropout_prob
_lowerCAmelCase :Tuple = initializer_range
_lowerCAmelCase :List[Any] = layer_norm_eps
_lowerCAmelCase :List[Any] = image_size
_lowerCAmelCase :str = num_frames
_lowerCAmelCase :Union[str, Any] = tubelet_size
_lowerCAmelCase :int = num_channels
_lowerCAmelCase :Dict = qkv_bias
super().__init__(**_UpperCAmelCase ) | 687 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Optional[int] = 10
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :str = [1, 2, 3, 4]
_lowerCAmelCase :Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_lowerCAmelCase :Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
_lowerCAmelCase , _lowerCAmelCase :Optional[Any] = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Optional[int] = ''
_lowerCAmelCase , _lowerCAmelCase :str = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Optional[Any] = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
_lowerCAmelCase , _lowerCAmelCase :Optional[int] = process_story(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[int] = ['It was the best of times.']
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Union[str, Any] = torch.tensor([1, 2, 3, 4] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 0 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_lowerCAmelCase :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 23 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 1 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :List[str] = 101
_lowerCAmelCase :Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_lowerCAmelCase :int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_lowerCAmelCase :List[str] = compute_token_type_ids(_UpperCAmelCase , _UpperCAmelCase )
np.testing.assert_array_equal(_UpperCAmelCase , _UpperCAmelCase ) | 687 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Dict = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 233 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
@add_end_docstrings(
lowerCAmelCase__ , R'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class __lowerCAmelCase( lowerCAmelCase__ ):
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : GenericTensor ):
"""simple docstring"""
if self.framework == "tf":
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
SCREAMING_SNAKE_CASE_ :List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=SCREAMING_SNAKE_CASE )
else:
raise ValueError('Unsupported framework' )
return masked_index
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : GenericTensor ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.get_masked_index(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : GenericTensor ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(SCREAMING_SNAKE_CASE )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str]=None , **SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if return_tensors is None:
SCREAMING_SNAKE_CASE_ :Dict = self.framework
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(SCREAMING_SNAKE_CASE )
return model_inputs
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.model(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = model_inputs['input_ids']
return model_outputs
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict=5 , SCREAMING_SNAKE_CASE : Any=None ):
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
SCREAMING_SNAKE_CASE_ :Tuple = target_ids.shape[0]
SCREAMING_SNAKE_CASE_ :List[Any] = model_outputs['input_ids'][0]
SCREAMING_SNAKE_CASE_ :List[str] = model_outputs['logits']
if self.framework == "tf":
SCREAMING_SNAKE_CASE_ :Optional[int] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
SCREAMING_SNAKE_CASE_ :Dict = outputs.numpy()
SCREAMING_SNAKE_CASE_ :Any = outputs[0, masked_index, :]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = stable_softmax(SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tf.gather_nd(tf.squeeze(SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
SCREAMING_SNAKE_CASE_ :List[str] = tf.expand_dims(SCREAMING_SNAKE_CASE , 0 )
SCREAMING_SNAKE_CASE_ :Optional[Any] = tf.math.top_k(SCREAMING_SNAKE_CASE , k=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[int] = topk.values.numpy(), topk.indices.numpy()
else:
SCREAMING_SNAKE_CASE_ :List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
SCREAMING_SNAKE_CASE_ :str = outputs[0, masked_index, :]
SCREAMING_SNAKE_CASE_ :int = logits.softmax(dim=-1 )
if target_ids is not None:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = probs[..., target_ids]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[int] = probs.topk(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Any = []
SCREAMING_SNAKE_CASE_ :str = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
SCREAMING_SNAKE_CASE_ :Optional[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
SCREAMING_SNAKE_CASE_ :Any = input_ids.numpy().copy()
if target_ids is not None:
SCREAMING_SNAKE_CASE_ :Optional[Any] = target_ids[p].tolist()
SCREAMING_SNAKE_CASE_ :int = p
# Filter padding out:
SCREAMING_SNAKE_CASE_ :Optional[int] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
SCREAMING_SNAKE_CASE_ :Any = self.tokenizer.decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Any = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(SCREAMING_SNAKE_CASE )
result.append(SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int=None ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [targets]
try:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.tokenizer.get_vocab()
except Exception:
SCREAMING_SNAKE_CASE_ :List[Any] = {}
SCREAMING_SNAKE_CASE_ :str = []
for target in targets:
SCREAMING_SNAKE_CASE_ :Optional[int] = vocab.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if id_ is None:
SCREAMING_SNAKE_CASE_ :List[Any] = self.tokenizer(
SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , max_length=1 , truncation=SCREAMING_SNAKE_CASE , )['input_ids']
if len(SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
'We cannot replace it with anything meaningful, ignoring it' )
continue
SCREAMING_SNAKE_CASE_ :Union[str, Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
SCREAMING_SNAKE_CASE_ :List[str] = list(set(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('At least one target must be provided when passed.' )
SCREAMING_SNAKE_CASE_ :Optional[int] = np.array(SCREAMING_SNAKE_CASE )
return target_ids
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[Any] = {}
if targets is not None:
SCREAMING_SNAKE_CASE_ :Dict = self.get_target_ids(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Any = target_ids
if top_k is not None:
SCREAMING_SNAKE_CASE_ :str = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Dict = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 233 | 1 |
"""simple docstring"""
class __UpperCamelCase :
def __init__( self ,_A = "" ,_A = False ):
'''simple docstring'''
_lowerCAmelCase : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
_lowerCAmelCase : Tuple = is_leaf
_lowerCAmelCase : Optional[Any] = prefix
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = 0
for q, w in zip(self.prefix ,_A ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
for word in words:
self.insert(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.prefix == word:
_lowerCAmelCase : Any = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_lowerCAmelCase : Dict = RadixNode(prefix=_A ,is_leaf=_A )
else:
_lowerCAmelCase : Union[str, Any] = self.nodes[word[0]]
_lowerCAmelCase : Union[str, Any] = incoming_node.match(
_A )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_A )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_lowerCAmelCase : Optional[Any] = remaining_prefix
_lowerCAmelCase : Dict = self.nodes[matching_string[0]]
_lowerCAmelCase : Optional[Any] = RadixNode(_A ,_A )
_lowerCAmelCase : List[Any] = aux_node
if remaining_word == "":
_lowerCAmelCase : Tuple = True
else:
self.nodes[matching_string[0]].insert(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.nodes.get(word[0] ,_A )
if not incoming_node:
return False
else:
_lowerCAmelCase : Optional[int] = incoming_node.match(
_A )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.nodes.get(word[0] ,_A )
if not incoming_node:
return False
else:
_lowerCAmelCase : Dict = incoming_node.match(
_A )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_A )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_lowerCAmelCase : Optional[int] = list(self.nodes.values() )[0]
_lowerCAmelCase : Optional[Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
_lowerCAmelCase : str = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_lowerCAmelCase : List[Any] = False
# If there is 1 edge, we merge it with its child
else:
_lowerCAmelCase : Optional[Any] = list(incoming_node.nodes.values() )[0]
_lowerCAmelCase : List[str] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_lowerCAmelCase : List[str] = merging_node.nodes
return True
def __lowerCamelCase ( self ,_A = 0 ):
'''simple docstring'''
if self.prefix != "":
print('-' * height ,self.prefix ,' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : List[str] = "banana bananas bandana band apple all beast".split()
_lowerCAmelCase : str = RadixNode()
root.insert_many(_lowerCamelCase )
assert all(root.find(_lowerCamelCase ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def lowerCamelCase__ ( ):
'''simple docstring'''
assert test_trie()
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = RadixNode()
_lowerCAmelCase : Optional[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(_lowerCamelCase )
print('Words:' , _lowerCamelCase )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 259 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
a : Optional[Any] = namedtuple("CoinsDistribResult", "moves excess")
def lowercase ( __magic_name__ ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__magic_name__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__magic_name__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__magic_name__ ) != count_coins(__magic_name__ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__magic_name__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_distrib(node.left )
UpperCAmelCase , UpperCAmelCase : Any = get_distrib(node.right )
UpperCAmelCase : Optional[Any] = 1 - left_distrib_excess
UpperCAmelCase : int = 1 - right_distrib_excess
UpperCAmelCase : List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(__magic_name__ )
+ abs(__magic_name__ )
)
UpperCAmelCase : List[Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__magic_name__ , __magic_name__ )
return get_distrib(__magic_name__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | 0 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
lowercase__ : str = ''''''
lowercase__ : Any = ''''''
lowercase__ : Tuple = ''''''
lowercase__ : int = ''''''
def _lowerCAmelCase ( __snake_case : str ) -> None:
# authorize twitter, initialize tweepy
__A : int = tweepy.OAuthHandler(__snake_case , __snake_case )
auth.set_access_token(__snake_case , __snake_case )
__A : Union[str, Any] = tweepy.API(__snake_case )
# initialize a list to hold all the tweepy Tweets
__A : Tuple = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__A : Tuple = api.user_timeline(screen_name=__snake_case , count=2_00 )
# save most recent tweets
alltweets.extend(__snake_case )
# save the id of the oldest tweet less one
__A : str = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__snake_case ) > 0:
print(f'getting tweets before {oldest}' )
# all subsequent requests use the max_id param to prevent duplicates
__A : Optional[int] = api.user_timeline(
screen_name=__snake_case , count=2_00 , max_id=__snake_case )
# save most recent tweets
alltweets.extend(__snake_case )
# update the id of the oldest tweet less one
__A : Dict = alltweets[-1].id - 1
print(f'...{len(__snake_case )} tweets downloaded so far' )
# transform the tweepy tweets into a 2D array that will populate the csv
__A : str = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'new_{screen_name}_tweets.csv' , 'w' ) as f:
__A : int = csv.writer(__snake_case )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(__snake_case )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''') | 715 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : Dict = 16
lowercase__ : Optional[int] = 32
def _lowerCAmelCase ( __snake_case : Accelerator , __snake_case : int = 16 ) -> Optional[Any]:
__A : Tuple = AutoTokenizer.from_pretrained('bert-base-cased' )
__A : List[Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(__snake_case : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__A : int = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__A : Dict = datasets.map(
__snake_case , batched=__snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__A : Dict = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__snake_case : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__A : int = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__A : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
__A : Optional[Any] = 8
else:
__A : List[str] = None
return tokenizer.pad(
__snake_case , padding='longest' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='pt' , )
# Instantiate dataloaders.
__A : List[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
__A : List[str] = DataLoader(
tokenized_datasets['validation'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : Optional[int] = mocked_dataloaders # noqa: F811
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : List[Any] ) -> Optional[int]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __snake_case ) == "1":
__A : Optional[int] = 2
# Initialize accelerator
__A : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__A : str = config['lr']
__A : Dict = int(config['num_epochs'] )
__A : str = int(config['seed'] )
__A : Any = int(config['batch_size'] )
__A : Any = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
__A : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__A : Tuple = batch_size // MAX_GPU_BATCH_SIZE
__A : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(__snake_case )
__A ,__A : int = get_dataloaders(__snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__A : List[str] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__A : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
__A : Optional[Any] = AdamW(params=model.parameters() , lr=__snake_case )
# Instantiate scheduler
__A : Dict = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=1_00 , num_training_steps=(len(__snake_case ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__A ,__A ,__A ,__A ,__A : int = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__A : Tuple = model(**__snake_case )
__A : Union[str, Any] = outputs.loss
__A : str = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__A : Dict = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__A : Dict = model(**__snake_case )
__A : Any = outputs.logits.argmax(dim=-1 )
__A ,__A : Tuple = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__snake_case ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__A : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__A : Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
__A : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , __snake_case )
def _lowerCAmelCase ( ) -> int:
__A : Dict = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__snake_case , default=__snake_case , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__A : List[Any] = parser.parse_args()
__A : Any = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main() | 338 | 0 |
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
__lowerCamelCase = namedtuple('covid_data', 'cases deaths recovered')
def a ( __UpperCAmelCase : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
__magic_name__: Dict = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(__UpperCAmelCase ).content ).xpath(__UpperCAmelCase ) )
__lowerCamelCase = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 96 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a = StableDiffusionDiffEditPipeline
_a = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""}
_a = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""}
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase , )
_lowercase =DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
_lowercase =DDIMInverseScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase , set_alpha_to_zero=lowerCAmelCase , )
torch.manual_seed(0 )
_lowercase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowercase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
_lowercase =CLIPTextModel(lowerCAmelCase )
_lowercase =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowercase ={
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Tuple:
'''simple docstring'''
_lowercase =floats_tensor((1, 16, 16) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
_lowercase =floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('mps' ):
_lowercase =torch.manual_seed(lowerCAmelCase )
else:
_lowercase =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
_lowercase ={
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[Any]:
'''simple docstring'''
_lowercase =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
_lowercase =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' )
if str(lowerCAmelCase ).startswith('mps' ):
_lowercase =torch.manual_seed(lowerCAmelCase )
else:
_lowercase =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
_lowercase ={
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]:
'''simple docstring'''
_lowercase =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
_lowercase =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' )
if str(lowerCAmelCase ).startswith('mps' ):
_lowercase =torch.manual_seed(lowerCAmelCase )
else:
_lowercase =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
_lowercase ={
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def A__ ( self ) -> Any:
'''simple docstring'''
if not hasattr(self.pipeline_class , '_optional_components' ):
return
_lowercase =self.get_dummy_components()
_lowercase =self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
_lowercase =self.get_dummy_inputs(lowerCAmelCase )
_lowercase =pipe(**lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase )
_lowercase =self.pipeline_class.from_pretrained(lowerCAmelCase )
pipe_loaded.to(lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase , lowerCAmelCase ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
_lowercase =self.get_dummy_inputs(lowerCAmelCase )
_lowercase =pipe_loaded(**lowerCAmelCase )[0]
_lowercase =np.abs(output - output_loaded ).max()
self.assertLess(lowerCAmelCase , 1e-4 )
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase ='cpu'
_lowercase =self.get_dummy_components()
_lowercase =self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase =self.get_dummy_mask_inputs(lowerCAmelCase )
_lowercase =pipe.generate_mask(**lowerCAmelCase )
_lowercase =mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
_lowercase =np.array([0] * 9 )
_lowercase =np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase ='cpu'
_lowercase =self.get_dummy_components()
_lowercase =self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase =self.get_dummy_inversion_inputs(lowerCAmelCase )
_lowercase =pipe.invert(**lowerCAmelCase ).images
_lowercase =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
_lowercase =np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
_lowercase =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase , 1e-3 )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase ='cpu'
_lowercase =self.get_dummy_components()
_lowercase ={'beta_start': 0.00085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
_lowercase =DPMSolverMultistepScheduler(**lowerCAmelCase )
_lowercase =DPMSolverMultistepInverseScheduler(**lowerCAmelCase )
_lowercase =self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase =self.get_dummy_inversion_inputs(lowerCAmelCase )
_lowercase =pipe.invert(**lowerCAmelCase ).images
_lowercase =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
_lowercase =np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
_lowercase =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase , 1e-3 )
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def A__ ( cls ) -> int:
'''simple docstring'''
_lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
_lowercase =raw_image.convert('RGB' ).resize((768, 768) )
_lowercase =raw_image
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =torch.manual_seed(0 )
_lowercase =StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=lowerCAmelCase , torch_dtype=torch.floataa )
_lowercase =DDIMScheduler.from_config(pipe.scheduler.config )
_lowercase =DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase ='a bowl of fruit'
_lowercase ='a bowl of pears'
_lowercase =pipe.generate_mask(
image=self.raw_image , source_prompt=lowerCAmelCase , target_prompt=lowerCAmelCase , generator=lowerCAmelCase , )
_lowercase =pipe.invert(
prompt=lowerCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=lowerCAmelCase ).latents
_lowercase =pipe(
prompt=lowerCAmelCase , mask_image=lowerCAmelCase , image_latents=lowerCAmelCase , generator=lowerCAmelCase , negative_prompt=lowerCAmelCase , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
_lowercase =(
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =torch.manual_seed(0 )
_lowercase =StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=lowerCAmelCase , torch_dtype=torch.floataa )
_lowercase =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_lowercase =DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase ='a bowl of fruit'
_lowercase ='a bowl of pears'
_lowercase =pipe.generate_mask(
image=self.raw_image , source_prompt=lowerCAmelCase , target_prompt=lowerCAmelCase , generator=lowerCAmelCase , )
_lowercase =pipe.invert(
prompt=lowerCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=lowerCAmelCase , num_inference_steps=25 , ).latents
_lowercase =pipe(
prompt=lowerCAmelCase , mask_image=lowerCAmelCase , image_latents=lowerCAmelCase , generator=lowerCAmelCase , negative_prompt=lowerCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
_lowercase =(
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 291 | 0 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Any , lowerCamelCase_ : List[str] ) -> Optional[int]:
__magic_name__ : Union[str, Any] = str(id_ )
__magic_name__ : Union[str, Any] = None
__magic_name__ : Any = None
__magic_name__ : Dict = []
__magic_name__ : Any = {} # {vertex:distance}
def __lt__( self : Any , lowerCamelCase_ : Optional[Any] ) -> Dict:
return self.key < other.key
def __repr__( self : Optional[Any] ) -> Union[str, Any]:
return self.id
def UpperCAmelCase__ ( self : int , lowerCamelCase_ : Any ) -> List[Any]:
self.neighbors.append(lowercase_ )
def UpperCAmelCase__ ( self : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ) -> List[Any]:
__magic_name__ : Dict = weight
def lowercase__ ( __A: str ,__A: List[Any] ,__A: Union[str, Any] ,__A: int ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] ,a__ )
graph[b - 1].add_edge(graph[a - 1] ,a__ )
def lowercase__ ( __A: str ,__A: List[str] ):
'''simple docstring'''
__magic_name__ : Dict = []
for u in graph:
__magic_name__ : Union[str, Any] = math.inf
__magic_name__ : List[Any] = None
__magic_name__ : Dict = 0
__magic_name__ : Union[str, Any] = graph[:]
while q:
__magic_name__ : List[Any] = min(a__ )
q.remove(a__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__magic_name__ : Dict = u
__magic_name__ : Optional[int] = u.edges[v.id]
for i in range(1 ,len(a__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowercase__ ( __A: List[Any] ,__A: Optional[int] ):
'''simple docstring'''
for u in graph:
__magic_name__ : str = math.inf
__magic_name__ : List[str] = None
__magic_name__ : Tuple = 0
__magic_name__ : Optional[int] = list(a__ )
hq.heapify(a__ )
while h:
__magic_name__ : Optional[Any] = hq.heappop(a__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__magic_name__ : int = u
__magic_name__ : Tuple = u.edges[v.id]
hq.heapify(a__ )
for i in range(1 ,len(a__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowercase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
import sys
import turtle
def lowercase__ ( __A: tuple[float, float] ,__A: tuple[float, float] ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase__ ( __A: tuple[float, float] ,__A: tuple[float, float] ,__A: tuple[float, float] ,__A: int ,):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
if depth == 0:
return
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
triangle(__A ,get_mid(__A ,__A ) ,get_mid(__A ,__A ) ,depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
__lowerCamelCase : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
__lowerCamelCase : Optional[Any] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 501 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = "▁"
a = {"vocab_file": "sentencepiece.bpe.model"}
a = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
}
}
a = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
a = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class _A ( __lowercase ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = ["""input_ids""", """attention_mask"""]
__a = []
__a = []
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase = 1
_UpperCAmelCase = len(self.sp_model )
_UpperCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_SCREAMING_SNAKE_CASE )
}
_UpperCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
_UpperCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_UpperCAmelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_UpperCAmelCase = src_lang if src_lang is not None else """en_XX"""
_UpperCAmelCase = self.lang_code_to_id[self._src_lang]
_UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCAmelCase ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCAmelCase ( self ):
return self._src_lang
@src_lang.setter
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [1] * len(self.prefix_tokens )
_UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_UpperCAmelCase = src_lang
_UpperCAmelCase = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tgt_lang_id
return inputs
def UpperCAmelCase ( self ):
_UpperCAmelCase = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase = self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = """""".join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_UpperCAmelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , """wb""" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "en_XX" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "ro_RO" , **_SCREAMING_SNAKE_CASE , ):
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.lang_code_to_id[src_lang]
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.lang_code_to_id[lang]
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code] | 518 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def _SCREAMING_SNAKE_CASE ( ) -> Any:
_UpperCAmelCase = 9
_UpperCAmelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
_UpperCAmelCase = kruskal(snake_case , snake_case )
_UpperCAmelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(snake_case ) == sorted(snake_case ) | 518 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class A__ ( __magic_name__ ):
lowercase = 'lilt'
def __init__( self : Tuple , a : Optional[int]=30_522 , a : int=768 , a : List[Any]=12 , a : List[Any]=12 , a : Any=3_072 , a : Union[str, Any]="gelu" , a : Union[str, Any]=0.1 , a : Tuple=0.1 , a : Any=512 , a : int=2 , a : Optional[Any]=0.0_2 , a : str=1E-12 , a : str=0 , a : int="absolute" , a : List[str]=None , a : int=4 , a : List[Any]=1_024 , **a : Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=a , **a )
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : Union[str, Any] = num_hidden_layers
lowerCAmelCase__ : Optional[Any] = num_attention_heads
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : Optional[Any] = intermediate_size
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = max_position_embeddings
lowerCAmelCase__ : Dict = type_vocab_size
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Optional[Any] = layer_norm_eps
lowerCAmelCase__ : List[Any] = position_embedding_type
lowerCAmelCase__ : Optional[int] = classifier_dropout
lowerCAmelCase__ : Dict = channel_shrink_ratio
lowerCAmelCase__ : Union[str, Any] = max_ad_position_embeddings | 69 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = ConsistencyModelPipeline
lowercase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _lowerCamelCase ( self : Optional[Any] , a : Union[str, Any]=False ):
'''simple docstring'''
if class_cond:
lowerCAmelCase__ : Tuple = self.dummy_cond_unet
else:
lowerCAmelCase__ : Dict = self.dummy_uncond_unet
# Default to CM multistep sampler
lowerCAmelCase__ : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Optional[int] , a : Any=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : List[str] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : str = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = self.get_dummy_inputs(a )
lowerCAmelCase__ : str = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Tuple = self.get_dummy_components(class_cond=a )
lowerCAmelCase__ : Union[str, Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a )
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(a )
lowerCAmelCase__ : Optional[Any] = 1
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : List[Any] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[Any] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[int] = self.get_dummy_components(class_cond=a )
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a )
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : str = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Dict = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[Any] , a : Tuple=0 , a : Optional[Any]=False , a : Optional[Any]="cpu" , a : Union[str, Any]=torch.floataa , a : Dict=(1, 3, 64, 64) ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
lowerCAmelCase__ : List[Any] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
lowerCAmelCase__ : Optional[int] = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
lowerCAmelCase__ : Tuple = latents
return inputs
def _lowerCamelCase ( self : str , a : Tuple=0 , a : Tuple="cpu" , a : Tuple=torch.floataa , a : str=(1, 3, 64, 64) ):
'''simple docstring'''
if type(a ) == str:
lowerCAmelCase__ : str = torch.device(a )
lowerCAmelCase__ : List[str] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Any = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[Any] = self.get_inputs()
lowerCAmelCase__ : Dict = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Optional[int] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_inputs()
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : List[str] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
lowerCAmelCase__ : Dict = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : str = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Any = self.get_inputs(get_fixed_latents=a , device=a )
lowerCAmelCase__ : List[str] = 1
lowerCAmelCase__ : str = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
lowerCAmelCase__ : List[str] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Dict = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[int] = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 | 69 | 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _lowerCamelCase( ) -> str:
'''simple docstring'''
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__lowercase= Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
return image
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
__lowercase= []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Tuple:
'''simple docstring'''
__lowercase= dct.pop(__SCREAMING_SNAKE_CASE )
__lowercase= val
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__lowercase= state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
__lowercase= state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
__lowercase= torch.cat((q_bias, torch.zeros_like(__SCREAMING_SNAKE_CASE , requires_grad=__SCREAMING_SNAKE_CASE ), v_bias) )
__lowercase= qkv_bias
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[Any]:
'''simple docstring'''
__lowercase= 3_6_4 if 'coco' in model_name else 2_2_4
__lowercase= BlipaVisionConfig(image_size=__SCREAMING_SNAKE_CASE ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__lowercase= OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=__SCREAMING_SNAKE_CASE ).to_dict()
elif "opt-6.7b" in model_name:
__lowercase= OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=__SCREAMING_SNAKE_CASE ).to_dict()
elif "t5-xl" in model_name:
__lowercase= TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__lowercase= TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
__lowercase= BlipaConfig(vision_config=__SCREAMING_SNAKE_CASE , text_config=__SCREAMING_SNAKE_CASE )
return config, image_size
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__=None , lowercase__=False ) -> str:
'''simple docstring'''
__lowercase= (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
__lowercase= tokenizer('\n' , add_special_tokens=__SCREAMING_SNAKE_CASE ).input_ids[0]
__lowercase, __lowercase= get_blipa_config(__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE )
__lowercase= BlipaForConditionalGeneration(__SCREAMING_SNAKE_CASE ).eval()
__lowercase= {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__lowercase, __lowercase= model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__lowercase= 'cuda' if torch.cuda.is_available() else 'cpu'
__lowercase, __lowercase, __lowercase= load_model_and_preprocess(
name=__SCREAMING_SNAKE_CASE , model_type=__SCREAMING_SNAKE_CASE , is_eval=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
original_model.eval()
print('Done!' )
# update state dict keys
__lowercase= original_model.state_dict()
__lowercase= create_rename_keys(__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__lowercase= state_dict.pop(__SCREAMING_SNAKE_CASE )
if key.startswith('Qformer.bert' ):
__lowercase= key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__lowercase= key.replace('self' , 'attention' )
if "opt_proj" in key:
__lowercase= key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
__lowercase= key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
__lowercase= key.replace('opt' , 'language' )
if key.startswith('t5' ):
__lowercase= key.replace('t5' , 'language' )
__lowercase= val
# read in qv biases
read_in_q_v_bias(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowercase, __lowercase= hf_model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
assert len(__SCREAMING_SNAKE_CASE ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__lowercase= load_demo_image()
__lowercase= vis_processors['eval'](__SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(__SCREAMING_SNAKE_CASE )
__lowercase= tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(__SCREAMING_SNAKE_CASE )
# create processor
__lowercase= BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=__SCREAMING_SNAKE_CASE , image_std=__SCREAMING_SNAKE_CASE )
__lowercase= BlipaProcessor(image_processor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
__lowercase= processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values.to(__SCREAMING_SNAKE_CASE )
# make sure processor creates exact same pixel values
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
original_model.to(__SCREAMING_SNAKE_CASE )
hf_model.to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
if "opt" in model_name:
__lowercase= original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
__lowercase= hf_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).logits
else:
__lowercase= original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
__lowercase= input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
__lowercase= hf_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__lowercase= torch.tensor(
[[-4_1.5_8_5_0, -4.4440, -8.9922], [-4_7.4_3_2_2, -5.9143, -1.7340]] , device=__SCREAMING_SNAKE_CASE )
assert torch.allclose(logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__lowercase= torch.tensor(
[[-5_7.0_1_0_9, -9.8967, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=__SCREAMING_SNAKE_CASE )
else:
# cast to same type
__lowercase= logits.dtype
assert torch.allclose(original_logits.to(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
__lowercase= ''
__lowercase= tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_ids.to(__SCREAMING_SNAKE_CASE )
__lowercase= original_model.generate({'image': original_pixel_values} )
__lowercase= hf_model.generate(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , __SCREAMING_SNAKE_CASE )
__lowercase= input_ids.shape[1]
__lowercase= processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
__lowercase= [text.strip() for text in output_text]
print('HF generation:' , __SCREAMING_SNAKE_CASE )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
processor.push_to_hub(F'nielsr/{model_name}' )
hf_model.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
lowerCAmelCase = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
lowerCAmelCase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 230 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=99 , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Optional[Any]=None , ) -> str:
_UpperCamelCase =parent
_UpperCamelCase =batch_size
_UpperCamelCase =seq_length
_UpperCamelCase =is_training
_UpperCamelCase =use_input_mask
_UpperCamelCase =use_token_type_ids
_UpperCamelCase =use_labels
_UpperCamelCase =vocab_size
_UpperCamelCase =hidden_size
_UpperCamelCase =num_hidden_layers
_UpperCamelCase =num_attention_heads
_UpperCamelCase =intermediate_size
_UpperCamelCase =hidden_act
_UpperCamelCase =hidden_dropout_prob
_UpperCamelCase =attention_probs_dropout_prob
_UpperCamelCase =max_position_embeddings
_UpperCamelCase =type_vocab_size
_UpperCamelCase =type_sequence_label_size
_UpperCamelCase =initializer_range
_UpperCamelCase =num_labels
_UpperCamelCase =num_choices
_UpperCamelCase =scope
def UpperCamelCase__ ( self : Tuple ) -> List[Any]:
_UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase =None
if self.use_input_mask:
_UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase =None
if self.use_token_type_ids:
_UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase =None
_UpperCamelCase =None
_UpperCamelCase =None
if self.use_labels:
_UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self : Tuple ) -> List[Any]:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Dict:
_UpperCamelCase =NystromformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
_UpperCamelCase =model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
_UpperCamelCase =model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> int:
_UpperCamelCase =NystromformerForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] ) -> List[str]:
_UpperCamelCase =NystromformerForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCamelCase =model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> List[str]:
_UpperCamelCase =self.num_labels
_UpperCamelCase =NystromformerForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ) -> int:
_UpperCamelCase =self.num_labels
_UpperCamelCase =NystromformerForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ) -> str:
_UpperCamelCase =self.num_choices
_UpperCamelCase =NystromformerForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCamelCase =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase =model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase =self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) =config_and_inputs
_UpperCamelCase ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase):
"""simple docstring"""
lowerCAmelCase_ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCamelCase__ ( self : Any ) -> int:
_UpperCamelCase =NystromformerModelTester(self )
_UpperCamelCase =ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def UpperCamelCase__ ( self : Any ) -> Any:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self : str ) -> List[Any]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase =type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCamelCase__ ( self : Tuple ) -> Any:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def UpperCamelCase__ ( self : Optional[Any] ) -> str:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def UpperCamelCase__ ( self : Optional[int] ) -> Dict:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def UpperCamelCase__ ( self : List[str] ) -> Optional[int]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def UpperCamelCase__ ( self : Any ) -> Optional[int]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def UpperCamelCase__ ( self : int ) -> List[str]:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase =NystromformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class UpperCAmelCase ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self : List[Any] ) -> int:
_UpperCamelCase =NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
_UpperCamelCase =torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_UpperCamelCase =model(UpperCamelCase__ )[0]
_UpperCamelCase =torch.Size((1, 6, 768) )
self.assertEqual(output.shape , UpperCamelCase__ )
_UpperCamelCase =torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self : Any ) -> List[Any]:
_UpperCamelCase ='''the [MASK] of Belgium is Brussels'''
_UpperCamelCase =AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
_UpperCamelCase =NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
_UpperCamelCase =tokenizer(UpperCamelCase__ , return_tensors='''pt''' )
with torch.no_grad():
_UpperCamelCase =model(encoding.input_ids ).logits
_UpperCamelCase =token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , '''capital''' )
| 404 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 476 | from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
UpperCAmelCase_ = True
except (ImportError, AttributeError):
UpperCAmelCase_ = object
def UpperCamelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
'''simple docstring'''
pass
UpperCAmelCase_ = False
UpperCAmelCase_ = logging.get_logger('''transformers-cli/serving''')
def UpperCamelCase ( lowerCAmelCase_ ) -> int:
'''simple docstring'''
_A= pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowerCAmelCase_ , args.host , args.port , args.workers )
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : dict
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : List[str]
_SCREAMING_SNAKE_CASE : Optional[List[int]]
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : str
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : Any
class lowerCAmelCase ( _a ):
@staticmethod
def a__ ( lowerCAmelCase__ ):
_A= parser.add_parser(
'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' , type=lowerCAmelCase__ , choices=get_supported_tasks() , help='The task to run the pipeline on' , )
serve_parser.add_argument('--host' , type=lowerCAmelCase__ , default='localhost' , help='Interface the server will listen on.' )
serve_parser.add_argument('--port' , type=lowerCAmelCase__ , default=8888 , help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' , type=lowerCAmelCase__ , default=1 , help='Number of http workers' )
serve_parser.add_argument('--model' , type=lowerCAmelCase__ , help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' , type=lowerCAmelCase__ , help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' , type=lowerCAmelCase__ , help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' , type=lowerCAmelCase__ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
serve_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_A= pipeline
_A= host
_A= port
_A= workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install "transformers[serving]".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(f"Serving model over {host}:{port}" )
_A= FastAPI(
routes=[
APIRoute(
'/' , self.model_info , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['GET'] , ),
APIRoute(
'/tokenize' , self.tokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['POST'] , ),
APIRoute(
'/detokenize' , self.detokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['POST'] , ),
APIRoute(
'/forward' , self.forward , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['POST'] , ),
] , timeout=600 , )
def a__ ( self ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def a__ ( self ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def a__ ( self , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) ):
try:
_A= self._pipeline.tokenizer.tokenize(lowerCAmelCase__ )
if return_ids:
_A= self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
return ServeTokenizeResult(tokens=lowerCAmelCase__ , tokens_ids=lowerCAmelCase__ )
else:
return ServeTokenizeResult(tokens=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(lowerCAmelCase__ )} )
def a__ ( self , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , ):
try:
_A= self._pipeline.tokenizer.decode(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return ServeDeTokenizeResult(model='' , text=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(lowerCAmelCase__ )} )
async def a__ ( self , lowerCAmelCase__=Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) ):
# Check we don't have empty string
if len(lowerCAmelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_A= self._pipeline(lowerCAmelCase__ )
return ServeForwardResult(output=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(500 , {'error': str(lowerCAmelCase__ )} ) | 476 | 1 |
from ..utils import DummyObject, requires_backends
class a ( metaclass=__lowerCamelCase ):
__lowerCAmelCase : Union[str, Any] = ["""torch""", """scipy"""]
def __init__( self :Any ,*__lowercase :List[Any] ,**__lowercase :Optional[Any] ):
requires_backends(self ,['''torch''', '''scipy'''] )
@classmethod
def __lowerCamelCase ( cls :Union[str, Any] ,*__lowercase :Any ,**__lowercase :Any ):
requires_backends(cls ,['''torch''', '''scipy'''] )
@classmethod
def __lowerCamelCase ( cls :List[Any] ,*__lowercase :List[Any] ,**__lowercase :Any ):
requires_backends(cls ,['''torch''', '''scipy'''] )
| 252 |
from __future__ import annotations
from typing import Any
class a :
def __init__( self :Union[str, Any] ,__lowercase :int ,__lowercase :int ,__lowercase :float = 0 ):
snake_case__ , snake_case__ : Dict = row, column
snake_case__ : Tuple = [[default_value for c in range(__lowercase )] for r in range(__lowercase )]
def __str__( self :List[Any] ):
snake_case__ : Tuple = F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
snake_case__ : List[str] = 0
for row_vector in self.array:
for obj in row_vector:
snake_case__ : int = max(__lowercase ,len(str(__lowercase ) ) )
snake_case__ : Optional[Any] = F"""%{max_element_length}s"""
# Make string and return
def single_line(__lowercase :list[float] ) -> str:
nonlocal string_format_identifier
snake_case__ : List[Any] = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__lowercase ) for row_vector in self.array )
return s
def __repr__( self :List[str] ):
return str(self )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :tuple[int, int] ):
if not (isinstance(__lowercase ,(list, tuple) ) and len(__lowercase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self :Tuple ,__lowercase :tuple[int, int] ):
assert self.validate_indicies(__lowercase )
return self.array[loc[0]][loc[1]]
def __setitem__( self :Tuple ,__lowercase :tuple[int, int] ,__lowercase :float ):
assert self.validate_indicies(__lowercase )
snake_case__ : Any = value
def __add__( self :Optional[int] ,__lowercase :Matrix ):
assert isinstance(__lowercase ,__lowercase )
assert self.row == another.row and self.column == another.column
# Add
snake_case__ : Optional[Any] = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case__ : Optional[Any] = self[r, c] + another[r, c]
return result
def __neg__( self :int ):
snake_case__ : Optional[Any] = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case__ : Dict = -self[r, c]
return result
def __sub__( self :Any ,__lowercase :Matrix ):
return self + (-another)
def __mul__( self :Optional[Any] ,__lowercase :int | float | Matrix ):
if isinstance(__lowercase ,(int, float) ): # Scalar multiplication
snake_case__ : Optional[int] = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case__ : Dict = self[r, c] * another
return result
elif isinstance(__lowercase ,__lowercase ): # Matrix multiplication
assert self.column == another.row
snake_case__ : List[str] = Matrix(self.row ,another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
snake_case__ : Union[str, Any] = F"""Unsupported type given for another ({type(__lowercase )})"""
raise TypeError(__lowercase )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : str = Matrix(self.column ,self.row )
for r in range(self.row ):
for c in range(self.column ):
snake_case__ : Optional[int] = self[r, c]
return result
def __lowerCamelCase ( self :List[Any] ,__lowercase :Matrix ,__lowercase :Matrix ):
assert isinstance(__lowercase ,__lowercase ) and isinstance(__lowercase ,__lowercase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
snake_case__ : Optional[int] = v.transpose()
snake_case__ : Any = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
snake_case__ : Any = Matrix(3 , 3 , 0 )
for i in range(3 ):
snake_case__ : Union[str, Any] = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
snake_case__ : List[Any] = Matrix(3 , 1 , 0 )
snake_case__ , snake_case__ , snake_case__ : Tuple = 1, 2, -3
snake_case__ : Any = Matrix(3 , 1 , 0 )
snake_case__ , snake_case__ , snake_case__ : Dict = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowerCAmelCase , __lowerCAmelCase )}""" )
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 252 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
lowercase : int
lowercase : int
lowercase : float = 0.0
lowercase : int = 1
lowercase : int = 1
lowercase : bool = True
lowercase : bool = False
lowercase : bool = False
lowercase : bool = False
lowercase : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for i in range(self.num_layers ):
_lowerCAmelCase =self.in_channels if i == 0 else self.out_channels
_lowerCAmelCase =FlaxResnetBlockaD(
in_channels=__A , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
_lowerCAmelCase =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__A )
_lowerCAmelCase =resnets
_lowerCAmelCase =attentions
if self.add_downsample:
_lowerCAmelCase =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __A , __A , __A , __A=True ) -> Optional[Any]:
_lowerCAmelCase =()
for resnet, attn in zip(self.resnets , self.attentions ):
_lowerCAmelCase =resnet(__A , __A , deterministic=__A )
_lowerCAmelCase =attn(__A , __A , deterministic=__A )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCAmelCase =self.downsamplers_a(__A )
output_states += (hidden_states,)
return hidden_states, output_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
lowercase : int
lowercase : int
lowercase : float = 0.0
lowercase : int = 1
lowercase : bool = True
lowercase : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =[]
for i in range(self.num_layers ):
_lowerCAmelCase =self.in_channels if i == 0 else self.out_channels
_lowerCAmelCase =FlaxResnetBlockaD(
in_channels=__A , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
_lowerCAmelCase =resnets
if self.add_downsample:
_lowerCAmelCase =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __A , __A , __A=True ) -> Optional[int]:
_lowerCAmelCase =()
for resnet in self.resnets:
_lowerCAmelCase =resnet(__A , __A , deterministic=__A )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCAmelCase =self.downsamplers_a(__A )
output_states += (hidden_states,)
return hidden_states, output_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
lowercase : int
lowercase : int
lowercase : int
lowercase : float = 0.0
lowercase : int = 1
lowercase : int = 1
lowercase : bool = True
lowercase : bool = False
lowercase : bool = False
lowercase : bool = False
lowercase : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for i in range(self.num_layers ):
_lowerCAmelCase =self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCAmelCase =self.prev_output_channel if i == 0 else self.out_channels
_lowerCAmelCase =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
_lowerCAmelCase =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__A )
_lowerCAmelCase =resnets
_lowerCAmelCase =attentions
if self.add_upsample:
_lowerCAmelCase =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __A , __A , __A , __A , __A=True ) -> Any:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_lowerCAmelCase =res_hidden_states_tuple[-1]
_lowerCAmelCase =res_hidden_states_tuple[:-1]
_lowerCAmelCase =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCAmelCase =resnet(__A , __A , deterministic=__A )
_lowerCAmelCase =attn(__A , __A , deterministic=__A )
if self.add_upsample:
_lowerCAmelCase =self.upsamplers_a(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
lowercase : int
lowercase : int
lowercase : int
lowercase : float = 0.0
lowercase : int = 1
lowercase : bool = True
lowercase : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =[]
for i in range(self.num_layers ):
_lowerCAmelCase =self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCAmelCase =self.prev_output_channel if i == 0 else self.out_channels
_lowerCAmelCase =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
_lowerCAmelCase =resnets
if self.add_upsample:
_lowerCAmelCase =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __A , __A , __A , __A=True ) -> Any:
for resnet in self.resnets:
# pop res hidden states
_lowerCAmelCase =res_hidden_states_tuple[-1]
_lowerCAmelCase =res_hidden_states_tuple[:-1]
_lowerCAmelCase =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCAmelCase =resnet(__A , __A , deterministic=__A )
if self.add_upsample:
_lowerCAmelCase =self.upsamplers_a(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
lowercase : int
lowercase : float = 0.0
lowercase : int = 1
lowercase : int = 1
lowercase : bool = False
lowercase : bool = False
lowercase : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> int:
# there is always at least one resnet
_lowerCAmelCase =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_lowerCAmelCase =[]
for _ in range(self.num_layers ):
_lowerCAmelCase =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__A )
_lowerCAmelCase =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
_lowerCAmelCase =resnets
_lowerCAmelCase =attentions
def __call__( self , __A , __A , __A , __A=True ) -> int:
_lowerCAmelCase =self.resnets[0](__A , __A )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_lowerCAmelCase =attn(__A , __A , deterministic=__A )
_lowerCAmelCase =resnet(__A , __A , deterministic=__A )
return hidden_states
| 58 | '''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =os.path.join(args.tf_model_dir , 'parameters.json' )
_lowerCAmelCase =json.loads(open(a__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('.pt' ):
_lowerCAmelCase =args.output + '.pt'
_lowerCAmelCase =OrderedDict()
with tf.device('/CPU:0' ):
_lowerCAmelCase =tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase =reader.get_tensor(a__ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
_lowerCAmelCase =int(key_name[9] )
elif key_name.startswith('pasts/out' ):
_lowerCAmelCase =8
_lowerCAmelCase ='model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/moe' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/softmlp/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
_lowerCAmelCase =key_name[-9:-7]
for i in range(1_6 ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
_lowerCAmelCase =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/mlp' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p1/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/ln' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/att' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
_lowerCAmelCase =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase =state[:, 0, :, :]
_lowerCAmelCase =state[:, 1, :, :]
_lowerCAmelCase =state[:, 2, :, :]
_lowerCAmelCase =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/o/kernel' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
_lowerCAmelCase =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/an' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
_lowerCAmelCase ={'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
_lowerCAmelCase ='model.%s.weight' % nlayer
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
if key_name.startswith('model/wte' ):
_lowerCAmelCase ='lm_head.weight'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/wob' ):
_lowerCAmelCase ='final_logits_bias'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =state.reshape((1, -1) )
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense/kernel":
_lowerCAmelCase ='model.last_project.weight'
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase ='model.last_project.bias'
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
torch.save(a__ , args.output )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 58 | 1 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def __A ( a_ : Optional[int] )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor(a_ )
SCREAMING_SNAKE_CASE : str = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __A ( a_ : int )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(a_ )
SCREAMING_SNAKE_CASE : Any = tf.cast(math.pi , x.dtype )
SCREAMING_SNAKE_CASE : Any = tf.cast(0.04_4715 , x.dtype )
SCREAMING_SNAKE_CASE : Tuple = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(a_ , 3 )) ))
return x * cdf
def __A ( a_ : Union[str, Any] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = tf.convert_to_tensor(a_ )
return x * tf.tanh(tf.math.softplus(a_ ) )
def __A ( a_ : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = tf.convert_to_tensor(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(0.04_4715 , x.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(0.79_7884_5608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __A ( a_ : Tuple )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __A ( a_ : Tuple )-> str:
'''simple docstring'''
return tf.clip_by_value(_gelu(a_ ) , -10 , 10 )
def __A ( a_ : Union[str, Any] , a_ : int=-1 )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = tf.split(a_ , 2 , axis=a_ )
return a * tf.math.sigmoid(a_ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def __A ( a_ : str )-> str:
'''simple docstring'''
return tf.keras.activations.gelu(a_ , approximate=a_ )
lowerCamelCase__ : Any = tf.keras.activations.gelu
lowerCamelCase__ : int = approximate_gelu_wrap
else:
lowerCamelCase__ : int = _gelu
lowerCamelCase__ : Any = _gelu_new
lowerCamelCase__ : List[Any] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def __A ( a_ : Optional[Any] )-> Tuple:
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 698 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :List[str] , lowerCamelCase_ :UNetaDModel , lowerCamelCase_ :ScoreSdeVeScheduler ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self :int , lowerCamelCase_ :int = 1 , lowerCamelCase_ :int = 20_00 , lowerCamelCase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , **lowerCamelCase_ :Union[str, Any] , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.sample_size
SCREAMING_SNAKE_CASE : List[str] = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE : Any = self.unet
SCREAMING_SNAKE_CASE : Dict = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ ) * self.scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase_ )
self.scheduler.set_sigmas(lowerCamelCase_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step_correct(lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
# prediction step
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step_pred(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample, output.prev_sample_mean
SCREAMING_SNAKE_CASE : List[str] = sample_mean.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Any = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 698 | 1 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: Tuple ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Any=0 ):
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[column] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: Tuple=float("""inf""" ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1 , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
SCREAMING_SNAKE_CASE__ = current_dis
return min_dis
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int=float("""inf""" ) ):
for i in range(min(6 , points_counts - 1 ) , UpperCamelCase__ ):
for j in range(max(0 , i - 6 ) , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
SCREAMING_SNAKE_CASE__ = current_dis
return min_dis
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: Any ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(UpperCamelCase__ , UpperCamelCase__ )
# recursion
SCREAMING_SNAKE_CASE__ = points_counts // 2
SCREAMING_SNAKE_CASE__ = closest_pair_of_points_sqr(
UpperCamelCase__ , points_sorted_on_y[:mid] , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = closest_pair_of_points_sqr(
UpperCamelCase__ , points_sorted_on_y[mid:] , points_counts - mid )
SCREAMING_SNAKE_CASE__ = min(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = dis_between_closest_in_strip(
UpperCamelCase__ , len(UpperCamelCase__ ) , UpperCamelCase__ )
return min(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: Tuple ):
SCREAMING_SNAKE_CASE__ = column_based_sort(UpperCamelCase__ , column=0 )
SCREAMING_SNAKE_CASE__ = column_based_sort(UpperCamelCase__ , column=1 )
return (
closest_pair_of_points_sqr(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
) ** 0.5
if __name__ == "__main__":
_lowerCamelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points))) | 59 |
import inspect
import unittest
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE__ = inspect.getmembers(__A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE__ = """k-diffusion"""
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE__ = """invisible-watermark"""
assert backend in deps, f'''{backend} is not in the deps table!''' | 59 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
class _snake_case( _UpperCAmelCase ):
__snake_case: Optional[int] = ['''pixel_values''']
def __init__(self : int , a : Union[str, Any] = True , a : int = None , a : Dict = 0.9 , a : Union[str, Any] = PILImageResampling.BICUBIC , a : int = True , a : Optional[int] = None , a : List[str] = 1 / 2_55 , a : Tuple = True , a : str = True , a : Tuple = None , a : Any = None , **a : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**a )
A__ = size if size is not None else {'shortest_edge': 2_24}
A__ = get_size_dict(a , default_to_square=a )
A__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
A__ = get_size_dict(a , param_name='crop_size' )
A__ = do_resize
A__ = size
A__ = crop_pct
A__ = resample
A__ = do_center_crop
A__ = crop_size
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _UpperCamelCase (self : Optional[Any] , a : Optional[int] , a : int , a : Any = None , a : int = PILImageResampling.BICUBIC , a : List[Any] = None , **a : List[Any] , ) -> np.ndarray:
"""simple docstring"""
A__ = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"""size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
A__ = int(size['shortest_edge'] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
A__ = int(size['height'] / crop_pct )
else:
A__ = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct ))
else:
raise ValueError('Invalid size for resize: {}'.format(a ) )
A__ = get_resize_output_image_size(a , size=a , default_to_square=a )
else:
if "shortest_edge" in size:
A__ = get_resize_output_image_size(a , size=size['shortest_edge'] , default_to_square=a )
elif "height" in size and "width" in size:
A__ = (size['height'], size['width'])
else:
raise ValueError('Invalid size for resize: {}'.format(a ) )
return resize(a , size=a , resample=a , data_format=a , **a )
def _UpperCamelCase (self : int , a : Dict , a : Dict , a : List[Any] = None , **a : Dict , ) -> np.ndarray:
"""simple docstring"""
A__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""size must contain \'height\' and \'width\' as keys. Got {size.keys()}""" )
return center_crop(a , size=(size['height'], size['width']) , data_format=a , **a )
def _UpperCamelCase (self : List[Any] , a : str , a : Union[str, Any] , a : Tuple = None , **a : Optional[int] , ) -> List[str]:
"""simple docstring"""
return rescale(a , scale=a , data_format=a , **a )
def _UpperCamelCase (self : Union[str, Any] , a : str , a : Tuple , a : Any , a : Optional[Any] = None , **a : Any , ) -> np.ndarray:
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def _UpperCamelCase (self : Tuple , a : List[str] , a : List[Any] = None , a : Dict = None , a : int = None , a : List[Any] = None , a : List[Any] = None , a : str = None , a : Union[str, Any] = None , a : List[Any] = None , a : Tuple = None , a : List[Any] = None , a : Tuple = None , a : Tuple = None , a : Any = ChannelDimension.FIRST , **a : Optional[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
A__ = do_resize if do_resize is not None else self.do_resize
A__ = crop_pct if crop_pct is not None else self.crop_pct
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = size if size is not None else self.size
A__ = get_size_dict(a , default_to_square=a )
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(a , param_name='crop_size' )
A__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A__ = [to_numpy_array(a ) for image in images]
if do_resize:
A__ = [self.resize(image=a , size=a , crop_pct=a , resample=a ) for image in images]
if do_center_crop:
A__ = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
A__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
A__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
A__ = [to_channel_dimension_format(a , a ) for image in images]
A__ = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a )
| 531 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'visual_bert'
def __init__( self , lowercase=30_522 , lowercase=768 , lowercase=512 , lowercase=12 , lowercase=12 , lowercase=3_072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=False , lowercase=True , lowercase=1 , lowercase=0 , lowercase=2 , **lowercase , ) -> int:
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = visual_embedding_dim
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = type_vocab_size
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = bypass_transformer
lowerCAmelCase = special_visual_initialize
| 532 | 0 |
from __future__ import annotations
A_ = [True] * 1000001
A_ = 2
while i * i <= 1000000:
if seive[i]:
for j in range(i * i, 1000001, i):
A_ = False
i += 1
def __UpperCAmelCase ( UpperCAmelCase )-> bool:
"""simple docstring"""
return seive[n]
def __UpperCAmelCase ( UpperCAmelCase )-> bool:
"""simple docstring"""
return any(digit in '''02468''' for digit in str(UpperCAmelCase ) )
def __UpperCAmelCase ( UpperCAmelCase = 1000000 )-> list[int]:
"""simple docstring"""
lowercase = [2] # result already includes the number 2.
for num in range(3, limit + 1, 2 ):
if is_prime(UpperCAmelCase ) and not contains_an_even_digit(UpperCAmelCase ):
lowercase = str(UpperCAmelCase )
lowercase = [int(str_num[j:] + str_num[:j] ) for j in range(len(UpperCAmelCase ) )]
if all(is_prime(UpperCAmelCase ) for i in list_nums ):
result.append(UpperCAmelCase )
return result
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(F"{len(find_circular_primes()) = }")
| 479 | import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowercase :
def __init__( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : int=13 , __lowerCamelCase : Dict=7 , __lowerCamelCase : Any=9 , __lowerCamelCase : str=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Dict=37 , __lowerCamelCase : Optional[Any]=8 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : List[Any]=0.002 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : Union[str, Any]=0 , __lowerCamelCase : int=None , __lowerCamelCase : Optional[int]=None , ) -> Dict:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = encoder_seq_length
lowercase = decoder_seq_length
# For common tests
lowercase = self.decoder_seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = d_ff
lowercase = relative_attention_num_buckets
lowercase = dropout_rate
lowercase = initializer_factor
lowercase = eos_token_id
lowercase = pad_token_id
lowercase = decoder_start_token_id
lowercase = None
lowercase = decoder_layers
def __a ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return TaConfig.from_pretrained('''google/umt5-base''' )
def __a ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=None , ) -> Optional[int]:
'''simple docstring'''
if attention_mask is None:
lowercase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowercase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowercase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__lowerCamelCase )
if decoder_head_mask is None:
lowercase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__lowerCamelCase )
if cross_attn_head_mask is None:
lowercase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __a ( self : int ) -> Dict:
'''simple docstring'''
lowercase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowercase = input_ids.clamp(self.pad_token_id + 1 )
lowercase = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowercase = self.get_config()
lowercase = config.num_attention_heads
lowercase = self.prepare_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, input_dict
def __a ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
lowercase ,lowercase = self.prepare_config_and_inputs()
return config, inputs_dict
def __a ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __a ( self : str ) -> Any:
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __a ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str , ) -> Union[str, Any]:
'''simple docstring'''
lowercase = UMTaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase = model(
input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase , decoder_attention_mask=__lowerCamelCase , )
lowercase = model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase )
lowercase = result.last_hidden_state
lowercase = result.past_key_values
lowercase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__lowerCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __a ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , ) -> List[Any]:
'''simple docstring'''
lowercase = UMTaModel(config=__lowerCamelCase ).get_decoder().to(__lowerCamelCase ).eval()
# first forward pass
lowercase = model(__lowerCamelCase , use_cache=__lowerCamelCase )
lowercase = model(__lowerCamelCase )
lowercase = model(__lowerCamelCase , use_cache=__lowerCamelCase )
self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) )
self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) + 1 )
lowercase ,lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase = model(__lowerCamelCase )['''last_hidden_state''']
lowercase = model(__lowerCamelCase , past_key_values=__lowerCamelCase )['''last_hidden_state''']
# select random slice
lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase = output_from_no_past[:, -1, random_slice_idx].detach()
lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ) )
def __a ( self : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Any , ) -> str:
'''simple docstring'''
lowercase = UMTaModel(config=__lowerCamelCase ).to(__lowerCamelCase ).half().eval()
lowercase = model(**__lowerCamelCase )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(__lowerCamelCase ).any().item() )
@require_torch
class __lowercase ( _A , _A , _A , unittest.TestCase ):
lowercase = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowercase = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowercase = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
lowercase = True
lowercase = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowercase = [0.8, 0.9]
def __a ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def __a ( self : Any ) -> Any:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
lowercase = UMTaModel(config_and_inputs[0] ).to(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'{tmpdirname}/t5_test.onnx' , export_params=__lowerCamelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __a ( self : Dict ) -> Any:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__lowerCamelCase )
def __a ( self : Tuple ) -> List[Any]:
'''simple docstring'''
lowercase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
lowercase = self.model_tester.prepare_config_and_inputs()
lowercase = config_and_inputs[0]
lowercase = UMTaForConditionalGeneration(__lowerCamelCase ).eval()
model.to(__lowerCamelCase )
lowercase = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__lowerCamelCase ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCamelCase ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCamelCase ),
}
for attn_name, (name, mask) in zip(__lowerCamelCase , head_masking.items() ):
lowercase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowercase = torch.ones(
config.num_decoder_layers , config.num_heads , device=__lowerCamelCase )
lowercase = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__lowerCamelCase , return_dict_in_generate=__lowerCamelCase , **__lowerCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowercase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def __a ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def __a ( self : Any ) -> List[Any]:
'''simple docstring'''
lowercase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__lowerCamelCase ).to(__lowerCamelCase )
lowercase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__lowerCamelCase , legacy=__lowerCamelCase )
lowercase = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
lowercase = tokenizer(__lowerCamelCase , return_tensors='''pt''' , padding=__lowerCamelCase ).input_ids
# fmt: off
lowercase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(__lowerCamelCase , __lowerCamelCase )
lowercase = model.generate(input_ids.to(__lowerCamelCase ) )
lowercase = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
lowercase = tokenizer.batch_decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 479 | 1 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase__ ,unittest.TestCase ):
"""simple docstring"""
a_ = CodeGenTokenizer
a_ = CodeGenTokenizerFast
a_ = True
a_ = {"add_prefix_space": True}
a_ = False
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
a_ : List[Any] = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
a_ : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
a_ : Dict = {"""unk_token""": """<unk>"""}
a_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
def _lowerCAmelCase ( self , **lowerCAmelCase_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , **lowerCAmelCase_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
a_ : str = """lower newer"""
a_ : List[Any] = """lower newer"""
return input_text, output_text
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a_ : Any = """lower newer"""
a_ : int = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
a_ : Dict = tokenizer.tokenize(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Union[str, Any] = tokens + [tokenizer.unk_token]
a_ : Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a_ : int = self.get_tokenizer()
a_ : List[Any] = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase_ )
a_ : List[Any] = """lower newer"""
# Testing tokenization
a_ : List[str] = tokenizer.tokenize(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
a_ : Union[str, Any] = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids without special tokens
a_ : Optional[Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
a_ : Any = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids with special tokens
a_ : List[Any] = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase_ )
a_ : str = tokenizer.encode(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
a_ : str = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing the unknown token
a_ : Union[str, Any] = tokens + [rust_tokenizer.unk_token]
a_ : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
def _lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
'''simple docstring'''
pass
def _lowerCAmelCase ( self , lowerCAmelCase_=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
# Simple input
a_ : Dict = """This is a simple input"""
a_ : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
a_ : Any = ("""This is a simple input""", """This is a pair""")
a_ : int = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(
lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" , )
# Pair input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(
lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" , )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
a_ : List[Any] = """This is a simple input"""
a_ : Optional[Any] = ["""This is a simple input looooooooong""", """This is a simple input"""]
a_ : List[str] = ("""This is a simple input""", """This is a pair""")
a_ : Tuple = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
a_ : Optional[Any] = tokenizer.pad_token_id
a_ : Union[str, Any] = tokenizer(lowerCAmelCase_ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
a_ : Optional[int] = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncate=lowerCAmelCase_ , return_tensors="""np""" )
a_ : List[str] = tokenizer(*lowerCAmelCase_ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
a_ : Dict = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncate=lowerCAmelCase_ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = """$$$"""
a_ : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCAmelCase_ , add_bos_token=lowerCAmelCase_ )
a_ : int = """This is a simple input"""
a_ : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
a_ : Optional[int] = tokenizer.bos_token_id
a_ : int = tokenizer(lowerCAmelCase_ )
a_ : Optional[int] = tokenizer(lowerCAmelCase_ )
self.assertEqual(out_s.input_ids[0] , lowerCAmelCase_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
a_ : Tuple = tokenizer.decode(out_s.input_ids )
a_ : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowerCAmelCase_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[str] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
a_ : int = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
a_ : Dict = """\nif len_a > len_b: result = a\nelse: result = b"""
a_ : Optional[Any] = tokenizer.encode(lowerCAmelCase_ )
a_ : List[str] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
a_ : str = tokenizer.decode(lowerCAmelCase_ , truncate_before_pattern=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
pass
| 577 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _snake_case ( A_ : List[str] ):
"""simple docstring"""
a_ , a_ : Optional[int] = image.size
a_ , a_ : Optional[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
a_ : List[Any] = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
a_ : Tuple = np.array(A_ ).astype(np.floataa ) / 255.0
a_ : Tuple = image[None].transpose(0 , 3 , 1 , 2 )
a_ : Tuple = torch.from_numpy(A_ )
return 2.0 * image - 1.0
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1_00 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = None , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , ):
'''simple docstring'''
if isinstance(lowerCAmelCase_ , PIL.Image.Image ):
a_ : str = 1
elif isinstance(lowerCAmelCase_ , torch.Tensor ):
a_ : Tuple = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCAmelCase_ )}''' )
if isinstance(lowerCAmelCase_ , PIL.Image.Image ):
a_ : Any = preprocess(lowerCAmelCase_ )
a_ , a_ : Dict = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
a_ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width)
a_ : str = next(self.unet.parameters() ).dtype
a_ : Dict = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=lowerCAmelCase_ )
a_ : Optional[Any] = image.to(device=self.device , dtype=lowerCAmelCase_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCAmelCase_ , device=self.device )
a_ : Dict = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
a_ : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a_ : Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a_ : int = {}
if accepts_eta:
a_ : Union[str, Any] = eta
for t in self.progress_bar(lowerCAmelCase_ ):
# concat latents and low resolution image in the channel dimension.
a_ : int = torch.cat([latents, image] , dim=1 )
a_ : Dict = self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
# predict the noise residual
a_ : Optional[Any] = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
a_ : Tuple = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
# decode the image latents with the VQVAE
a_ : List[str] = self.vqvae.decode(lowerCAmelCase_ ).sample
a_ : Tuple = torch.clamp(lowerCAmelCase_ , -1.0 , 1.0 )
a_ : Optional[int] = image / 2 + 0.5
a_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a_ : str = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 577 | 1 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = MobileNetVaConfig(layer_norm_eps=0.0_01)
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''')
__lowerCAmelCase = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''', lowerCamelCase)
if matches:
__lowerCAmelCase = float(matches[1])
__lowerCAmelCase = int(matches[2])
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__lowerCAmelCase = 1_0_0_1
__lowerCAmelCase = '''imagenet-1k-id2label.json'''
__lowerCAmelCase = '''huggingface/label-files'''
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase, repo_type='''dataset'''), '''r'''))
__lowerCAmelCase = {int(lowerCamelCase) + 1: v for k, v in idalabel.items()}
__lowerCAmelCase = '''background'''
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __magic_name__( ):
__lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCAmelCase = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False):
__lowerCAmelCase = get_mobilenet_va_config(lowerCamelCase)
# Load 🤗 model
__lowerCAmelCase = MobileNetVaForImageClassification(lowerCamelCase).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__lowerCAmelCase = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size}, size={'''shortest_edge''': config.image_size + 3_2}, )
__lowerCAmelCase = image_processor(images=prepare_img(), return_tensors='''pt''')
__lowerCAmelCase = model(**lowerCamelCase)
__lowerCAmelCase = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
__lowerCAmelCase = torch.tensor([-4.17_39, -1.12_33, 3.12_05])
elif model_name == "mobilenet_v1_0.75_192":
__lowerCAmelCase = torch.tensor([-3.94_40, -2.31_41, -0.33_33])
else:
__lowerCAmelCase = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3], lowerCamelCase, atol=1E-4)
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(lowerCamelCase)
print(F"""Saving image processor to {pytorch_dump_folder_path}""")
image_processor.save_pretrained(lowerCamelCase)
if push_to_hub:
print('''Pushing to the hub...''')
__lowerCAmelCase = '''google/''' + model_name
image_processor.push_to_hub(lowerCamelCase)
model.push_to_hub(lowerCamelCase)
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_UpperCAmelCase : Tuple = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 718 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
_UpperCAmelCase : Optional[Any] = logging.getLogger(__name__)
class a__ ( __A ):
"""simple docstring"""
def _snake_case (self , __lowercase , __lowercase , __lowercase=None , __lowercase=None ):
__lowerCAmelCase = self.layer[current_layer](__lowercase , __lowercase , head_mask[current_layer] )
__lowerCAmelCase = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , __A , )
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__(__lowercase )
__lowerCAmelCase = BertEncoderWithPabee(__lowercase )
self.init_weights()
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
def _snake_case (self , __lowercase ):
__lowerCAmelCase = threshold
def _snake_case (self , __lowercase ):
__lowerCAmelCase = patience
def _snake_case (self ):
__lowerCAmelCase = 0
__lowerCAmelCase = 0
def _snake_case (self ):
__lowerCAmelCase = self.inference_layers_num / self.inference_instances_num
__lowerCAmelCase = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(__lowercase )
@add_start_docstrings_to_model_forward(__lowercase )
def _snake_case (self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
__lowerCAmelCase = input_ids.size()
elif inputs_embeds is not None:
__lowerCAmelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
__lowerCAmelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__lowerCAmelCase = torch.ones(__lowercase , device=__lowercase )
if token_type_ids is None:
__lowerCAmelCase = torch.zeros(__lowercase , dtype=torch.long , device=__lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__lowerCAmelCase = self.get_extended_attention_mask(__lowercase , __lowercase , __lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = encoder_hidden_states.size()
__lowerCAmelCase = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__lowerCAmelCase = torch.ones(__lowercase , device=__lowercase )
__lowerCAmelCase = self.invert_attention_mask(__lowercase )
else:
__lowerCAmelCase = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__lowerCAmelCase = self.get_head_mask(__lowercase , self.config.num_hidden_layers )
__lowerCAmelCase = self.embeddings(
input_ids=__lowercase , position_ids=__lowercase , token_type_ids=__lowercase , inputs_embeds=__lowercase )
__lowerCAmelCase = embedding_output
if self.training:
__lowerCAmelCase = []
for i in range(self.config.num_hidden_layers ):
__lowerCAmelCase = self.encoder.adaptive_forward(
__lowercase , current_layer=__lowercase , attention_mask=__lowercase , head_mask=__lowercase )
__lowerCAmelCase = self.pooler(__lowercase )
__lowerCAmelCase = output_layers[i](output_dropout(__lowercase ) )
res.append(__lowercase )
elif self.patience == 0: # Use all layers for inference
__lowerCAmelCase = self.encoder(
__lowercase , attention_mask=__lowercase , head_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
__lowerCAmelCase = self.pooler(encoder_outputs[0] )
__lowerCAmelCase = [output_layers[self.config.num_hidden_layers - 1](__lowercase )]
else:
__lowerCAmelCase = 0
__lowerCAmelCase = None
__lowerCAmelCase = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__lowerCAmelCase = self.encoder.adaptive_forward(
__lowercase , current_layer=__lowercase , attention_mask=__lowercase , head_mask=__lowercase )
__lowerCAmelCase = self.pooler(__lowercase )
__lowerCAmelCase = output_layers[i](__lowercase )
if regression:
__lowerCAmelCase = logits.detach()
if patient_result is not None:
__lowerCAmelCase = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__lowerCAmelCase = 0
else:
__lowerCAmelCase = logits.detach().argmax(dim=1 )
if patient_result is not None:
__lowerCAmelCase = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(__lowercase ) ):
patient_counter += 1
else:
__lowerCAmelCase = 0
__lowerCAmelCase = logits
if patient_counter == self.patience:
break
__lowerCAmelCase = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , __A , )
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__(__lowercase )
__lowerCAmelCase = config.num_labels
__lowerCAmelCase = BertModelWithPabee(__lowercase )
__lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
__lowerCAmelCase = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(__lowercase )
def _snake_case (self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ):
__lowerCAmelCase = self.bert(
input_ids=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , position_ids=__lowercase , head_mask=__lowercase , inputs_embeds=__lowercase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__lowerCAmelCase = (logits[-1],)
if labels is not None:
__lowerCAmelCase = None
__lowerCAmelCase = 0
for ix, logits_item in enumerate(__lowercase ):
if self.num_labels == 1:
# We are doing regression
__lowerCAmelCase = MSELoss()
__lowerCAmelCase = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__lowerCAmelCase = CrossEntropyLoss()
__lowerCAmelCase = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__lowerCAmelCase = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__lowerCAmelCase = (total_loss / total_weights,) + outputs
return outputs
| 474 | 0 |
def lowerCAmelCase__(__snake_case ) -> str:
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 481 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCAmelCase__(__snake_case ) -> Dict:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCamelCase__ = name.replace('''img_encoder.pos_embed''' ,'''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
lowerCamelCase__ = name.replace('''img_encoder.patch_embed.proj''' ,'''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
lowerCamelCase__ = name.replace('''img_encoder.patch_embed.norm''' ,'''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
lowerCamelCase__ = name.replace('''img_encoder.layers''' ,'''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
lowerCamelCase__ = name.replace('''blocks''' ,'''layers''' )
if "attn" in name and "pre_assign" not in name:
lowerCamelCase__ = name.replace('''attn''' ,'''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCamelCase__ = name.replace('''proj''' ,'''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
lowerCamelCase__ = name.replace('''pre_assign_attn.attn.proj''' ,'''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
lowerCamelCase__ = name.replace('''norm1''' ,'''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
lowerCamelCase__ = name.replace('''norm2''' ,'''layer_norm2''' )
if "img_encoder.norm" in name:
lowerCamelCase__ = name.replace('''img_encoder.norm''' ,'''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCamelCase__ = name.replace('''text_encoder.token_embedding''' ,'''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
lowerCamelCase__ = name.replace('''text_encoder.positional_embedding''' ,'''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
lowerCamelCase__ = name.replace('''text_encoder.transformer.resblocks.''' ,'''text_model.encoder.layers.''' )
if "ln_1" in name:
lowerCamelCase__ = name.replace('''ln_1''' ,'''layer_norm1''' )
if "ln_2" in name:
lowerCamelCase__ = name.replace('''ln_2''' ,'''layer_norm2''' )
if "c_fc" in name:
lowerCamelCase__ = name.replace('''c_fc''' ,'''fc1''' )
if "c_proj" in name:
lowerCamelCase__ = name.replace('''c_proj''' ,'''fc2''' )
if "text_encoder" in name:
lowerCamelCase__ = name.replace('''text_encoder''' ,'''text_model''' )
if "ln_final" in name:
lowerCamelCase__ = name.replace('''ln_final''' ,'''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCamelCase__ = name.replace('''img_projector.linear_hidden.''' ,'''visual_projection.''' )
if "img_projector.linear_out." in name:
lowerCamelCase__ = name.replace('''img_projector.linear_out.''' ,'''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
lowerCamelCase__ = name.replace('''text_projector.linear_hidden''' ,'''text_projection''' )
if "text_projector.linear_out" in name:
lowerCamelCase__ = name.replace('''text_projector.linear_out''' ,'''text_projection.3''' )
return name
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Optional[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCamelCase__ = orig_state_dict.pop(__snake_case )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase__ = key.split('''.''' )
lowerCamelCase__ , lowerCamelCase__ = int(key_split[2] ), int(key_split[4] )
lowerCamelCase__ = config.vision_config.hidden_size
if "weight" in key:
lowerCamelCase__ = val[:dim, :]
lowerCamelCase__ = val[dim : dim * 2, :]
lowerCamelCase__ = val[-dim:, :]
else:
lowerCamelCase__ = val[:dim]
lowerCamelCase__ = val[dim : dim * 2]
lowerCamelCase__ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase__ = key.split('''.''' )
lowerCamelCase__ = int(key_split[3] )
lowerCamelCase__ = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase__ = val[:dim, :]
lowerCamelCase__ = val[
dim : dim * 2, :
]
lowerCamelCase__ = val[-dim:, :]
else:
lowerCamelCase__ = val[:dim]
lowerCamelCase__ = val[dim : dim * 2]
lowerCamelCase__ = val[-dim:]
else:
lowerCamelCase__ = rename_key(__snake_case )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCamelCase__ = val.squeeze_()
else:
lowerCamelCase__ = val
return orig_state_dict
def lowerCAmelCase__() -> int:
'''simple docstring'''
lowerCamelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ = Image.open(requests.get(__snake_case ,stream=__snake_case ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case="groupvit-gcc-yfcc" ,__snake_case=False ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = GroupViTConfig()
lowerCamelCase__ = GroupViTModel(__snake_case ).eval()
lowerCamelCase__ = torch.load(__snake_case ,map_location='''cpu''' )['''model''']
lowerCamelCase__ = convert_state_dict(__snake_case ,__snake_case )
lowerCamelCase__ , lowerCamelCase__ = model.load_state_dict(__snake_case ,strict=__snake_case )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__snake_case ) == 0)
# verify result
lowerCamelCase__ = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = processor(text=['''a photo of a cat''', '''a photo of a dog'''] ,images=__snake_case ,padding=__snake_case ,return_tensors='''pt''' )
with torch.no_grad():
lowerCamelCase__ = model(**__snake_case )
if model_name == "groupvit-gcc-yfcc":
lowerCamelCase__ = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCamelCase__ = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image ,__snake_case ,atol=1E-3 )
processor.save_pretrained(__snake_case )
model.save_pretrained(__snake_case )
print('''Successfully saved processor and model to''' ,__snake_case )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(__snake_case ,organization='''nielsr''' )
model.push_to_hub(__snake_case ,organization='''nielsr''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
_a = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 481 | 1 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowercase_ = logging.get_logger(__name__)
class A_ :
'''simple docstring'''
def __init__( self: List[Any] , a: str = None , a: uuid.UUID = None , a: List[Any]=None , a: int=None ):
if not conversation_id:
__lowerCamelCase : List[Any] = uuid.uuida()
if past_user_inputs is None:
__lowerCamelCase : Union[str, Any] = []
if generated_responses is None:
__lowerCamelCase : List[str] = []
__lowerCamelCase : Dict = conversation_id
__lowerCamelCase : Dict = past_user_inputs
__lowerCamelCase : List[str] = generated_responses
__lowerCamelCase : Dict = text
def __eq__( self: Optional[Any] , a: Optional[Any] ):
if not isinstance(lowercase_ , lowercase_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _snake_case ( self: List[Any] , a: str , a: bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
__lowerCamelCase : Any = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
__lowerCamelCase : Union[str, Any] = text
def _snake_case ( self: Optional[int] ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowerCamelCase : Optional[Any] = None
def _snake_case ( self: Dict , a: str ):
self.generated_responses.append(lowercase_ )
def _snake_case ( self: List[Any] ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self: Union[str, Any] ):
__lowerCamelCase : List[Any] = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
__lowerCamelCase : str = 'user' if is_user else 'bot'
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
__UpperCamelCase , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: List[Any] , *a: Optional[Any] , **a: str ):
super().__init__(*lowercase_ , **lowercase_ )
if self.tokenizer.pad_token_id is None:
__lowerCamelCase : Optional[Any] = self.tokenizer.eos_token
def _snake_case ( self: Union[str, Any] , a: Union[str, Any]=None , a: int=None , a: str=None , **a: str ):
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Tuple = {}
__lowerCamelCase : Optional[int] = {}
if min_length_for_response is not None:
__lowerCamelCase : str = min_length_for_response
if minimum_tokens is not None:
__lowerCamelCase : Tuple = minimum_tokens
if "max_length" in generate_kwargs:
__lowerCamelCase : Tuple = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowerCamelCase : Any = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self: Any , a: Union[Conversation, List[Conversation]] , a: str=0 , **a: Union[str, Any] ):
__lowerCamelCase : int = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) == 1:
return outputs[0]
return outputs
def _snake_case ( self: List[Any] , a: Conversation , a: Any=32 ):
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
__lowerCamelCase : int = self.tokenizer._build_conversation_input_ids(lowercase_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowerCamelCase : List[Any] = self._legacy_parse_and_tokenize(lowercase_ )
if self.framework == "pt":
__lowerCamelCase : Optional[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowerCamelCase : Any = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _snake_case ( self: Union[str, Any] , a: Any , a: Optional[int]=10 , **a: Dict ):
__lowerCamelCase : Tuple = generate_kwargs.get('max_length' , self.model.config.max_length )
__lowerCamelCase : Union[str, Any] = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
__lowerCamelCase : Tuple = max_length - minimum_tokens
__lowerCamelCase : Optional[Any] = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
__lowerCamelCase : Optional[int] = model_inputs['attention_mask'][:, -trim:]
__lowerCamelCase : int = model_inputs.pop('conversation' )
__lowerCamelCase : Optional[Any] = max_length
__lowerCamelCase : Any = self.model.generate(**lowercase_ , **lowercase_ )
if self.model.config.is_encoder_decoder:
__lowerCamelCase : List[Any] = 1
else:
__lowerCamelCase : int = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _snake_case ( self: Optional[Any] , a: Union[str, Any] , a: int=True ):
__lowerCamelCase : Optional[int] = model_outputs['output_ids']
__lowerCamelCase : Optional[Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
__lowerCamelCase : Tuple = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(lowercase_ )
return conversation
def _snake_case ( self: Any , a: Conversation ):
__lowerCamelCase : str = self.tokenizer.eos_token_id
__lowerCamelCase : List[str] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
if len(lowercase_ ) > self.tokenizer.model_max_length:
__lowerCamelCase : List[str] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 716 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowercase_ = False
class A_ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self: Any ):
__lowerCamelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowerCamelCase : Any = torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.7_5 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
__lowerCamelCase : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = generator.manual_seed(0 )
__lowerCamelCase : Dict = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.7_5 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _snake_case ( self: int ):
__lowerCamelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = 'cyberpunk 2077'
__lowerCamelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowerCamelCase : List[Any] = torch.manual_seed(0 )
__lowerCamelCase : Any = pipe.dual_guided(
prompt=a , image=a , text_to_image_strength=0.7_5 , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__lowerCamelCase : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase : List[str] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__lowerCamelCase : Optional[Any] = 'A painting of a squirrel eating a burger '
__lowerCamelCase : Union[str, Any] = torch.manual_seed(0 )
__lowerCamelCase : Any = pipe.text_to_image(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
__lowerCamelCase : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase : List[Any] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__lowerCamelCase : List[str] = pipe.image_variation(a , generator=a , output_type='numpy' ).images
__lowerCamelCase : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase : Dict = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 230 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.